diff --git a/.bumpversion.cfg b/.bumpversion.cfg index 358d210c1..f74892b1d 100644 --- a/.bumpversion.cfg +++ b/.bumpversion.cfg @@ -1,6 +1,7 @@ [bumpversion] -current_version = 3.0.4 +current_version = 11.1.0 commit = True +message = Bump version: {current_version} → {new_version} [skip ci] [bumpversion:file:ibm_watson/version.py] search = __version__ = '{current_version}' @@ -9,4 +10,3 @@ replace = __version__ = '{new_version}' [bumpversion:file:setup.py] search = __version__ = '{current_version}' replace = __version__ = '{new_version}' - diff --git a/.env.enc b/.env.enc index ad9be380f..80fb09046 100644 Binary files a/.env.enc and b/.env.enc differ diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..c046c47f5 --- /dev/null +++ b/.github/CODE_OF_CONDUCT.md @@ -0,0 +1,46 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at ehdsouza27@gmail.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index f99646058..a6d58b0b9 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -7,7 +7,7 @@ assignees: '' --- -Remember, an issue is not the place to ask questions. You can use [Stack Overflow](http://stackoverflow.com/questions/tagged/ibm-watson) for that, or you may want to start a discussion on the [dW Answers](https://developer.ibm.com/answers/questions/ask/?topics=watson). +Remember, an issue is not the place to ask questions. If you have issues with the APIs or have a question about the Watson services, see [Stack Overflow](https://stackoverflow.com/questions/tagged/ibm-watson+python). Before you open an issue, please check if a similar issue already exists or has been closed before. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index c86f075d1..d326a686d 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -7,7 +7,7 @@ assignees: '' --- -Remember, an issue is not the place to ask questions. You can use [Stack Overflow](http://stackoverflow.com/questions/tagged/ibm-watson) for that, or you may want to start a discussion on the [dW Answers](https://developer.ibm.com/answers/questions/ask/?topics=watson). +Remember, an issue is not the place to ask questions. If you have issues with the APIs or have a question about the Watson services, see [Stack Overflow](https://stackoverflow.com/questions/tagged/ibm-watson+python). Before you open an issue, please check if a similar issue already exists or has been closed before. diff --git a/.github/issue_template.md b/.github/issue_template.md deleted file mode 100644 index c789fbb17..000000000 --- a/.github/issue_template.md +++ /dev/null @@ -1,12 +0,0 @@ -#### Expected behavior - -#### Actual behavior - -#### Steps to reproduce the problem - -#### Code snippet (Note: Do not paste your credentials) - -#### python sdk version - -#### python version - diff --git a/.github/workflows/build-test.yml b/.github/workflows/build-test.yml new file mode 100644 index 000000000..c5175d316 --- /dev/null +++ b/.github/workflows/build-test.yml @@ -0,0 +1,58 @@ +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support documentation. +# This workflow will do a clean install of python dependencies, build the source code and run tests across different versions of python +# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +name: Build and Test + +on: + push: + branches: [ '**' ] + pull_request: + branches: [ master ] + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +jobs: + build_test: + name: Build on Python ${{ matrix.python-version }} using ${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + matrix: + python-version: ['3.11', '3.12', '3.13'] + os: [ubuntu-latest] + + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies (ubuntu) + if: matrix.os == 'ubuntu-latest' + run: | + pip3 install -r requirements.txt + pip3 install -r requirements-dev.txt + pip3 install --editable . + - name: Execute Python 3.11 unit tests + if: matrix.python-version == '3.11' + run: | + pip3 install -U python-dotenv + py.test test/unit + - name: Execute Python 3.12 unit tests (ubuntu) + if: matrix.python-version == '3.12' && matrix.os == 'ubuntu-latest' + run: | + pip3 install -U python-dotenv + py.test test/unit --reruns 3 --cov=ibm_watson + - name: Execute Python 3.13 unit tests (ubuntu) + if: matrix.python-version == '3.13' && matrix.os == 'ubuntu-latest' + run: | + pip3 install -U python-dotenv + py.test test/unit --reruns 3 + - name: Upload coverage to Codecov + if: matrix.python-version == '3.12' && matrix.os == 'ubuntu-latest' + uses: codecov/codecov-action@v1 + with: + name: py${{ matrix.python-version }}-${{ matrix.os }} diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml new file mode 100644 index 000000000..a99ab6f9b --- /dev/null +++ b/.github/workflows/deploy.yml @@ -0,0 +1,104 @@ +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support documentation. +# This workflow will download a prebuilt Python version, install dependencies, build and deploy/publish a new release +# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +name: Deploy and Publish + +on: + workflow_run: + workflows: ["Build and Test"] + branches: [ master ] + types: + - completed + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +# default: least privileged permissions across all jobs +permissions: + contents: read + +jobs: + release: + runs-on: ubuntu-latest + if: ${{ github.event.workflow_run.conclusion == 'success' }} + concurrency: + group: ${{ github.workflow }}-release-${{ github.ref_name }} + cancel-in-progress: false + + permissions: + contents: write + + steps: + # Note: We checkout the repository at the branch that triggered the workflow. + # Python Semantic Release will automatically convert shallow clones to full clones + # if needed to ensure proper history evaluation. However, we forcefully reset the + # branch to the workflow sha because it is possible that the branch was updated + # while the workflow was running, which prevents accidentally releasing un-evaluated + # changes. + - name: Setup | Checkout Repository on Release Branch + uses: actions/checkout@v6 + with: + ref: ${{ github.ref_name }} + + - name: Setup | Force release branch to be at workflow sha + run: | + git reset --hard ${{ github.sha }} + + - name: Set up Python + uses: actions/setup-python@v6 + with: + python-version: "3.13" + + - name: Action | Semantic Version Release + id: release + # Adjust tag with desired version if applicable. + uses: python-semantic-release/python-semantic-release@v10.5.3 + with: + github_token: ${{ secrets.GH_TOKEN }} + git_committer_name: "Watson Github Bot" + git_committer_email: "watdevex@us.ibm.com" + + - name: Build a binary wheel and a source tarball + run: pip3 install setuptools wheel twine build && python setup.py sdist + + - name: Publish | Upload to GitHub Release Assets + uses: python-semantic-release/publish-action@v10.5.3 + if: steps.release.outputs.released == 'true' + with: + github_token: ${{ secrets.GH_TOKEN }} + tag: ${{ steps.release.outputs.tag }} + + - name: Upload | Distribution Artifacts + uses: actions/upload-artifact@v5 + with: + name: distribution-artifacts + path: dist/ + if-no-files-found: error + + outputs: + released: ${{ steps.release.outputs.released || 'false' }} + + deploy: + # 1. Separate out the deploy step from the publish step to run each step at + # the least amount of token privilege + # 2. Also, deployments can fail, and its better to have a separate job if you need to retry + # and it won't require reversing the release. + runs-on: ubuntu-latest + needs: release + if: ${{ needs.release.outputs.released == 'true' }} + + permissions: + contents: read + id-token: write # IMPORTANT: mandatory for trusted publishing + + steps: + - name: Download all the dists + uses: actions/download-artifact@v6 + with: + name: distribution-artifacts + path: dist/ + + - name: Publish distribution 📦 to PyPI + uses: pypa/gh-action-pypi-publish@release/v1 diff --git a/.github/workflows/integration-test.yml b/.github/workflows/integration-test.yml new file mode 100644 index 000000000..d95c423b1 --- /dev/null +++ b/.github/workflows/integration-test.yml @@ -0,0 +1,78 @@ +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support documentation. +# This workflow will download a prebuilt Python version, install dependencies and run integration tests + +name: Run Integration Tests + +on: + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +jobs: + integration_test: + name: Build and Run Integration Tests on Python ${{ matrix.python-version }} and ${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + matrix: + python-version: ["3.11"] + os: [ubuntu-latest] + + steps: + - uses: actions/checkout@v2 + + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + + - name: Install Python dependencies (ubuntu) + run: | + pip3 install -r requirements.txt + pip3 install -r requirements-dev.txt + pip3 install --editable . + + - name: Execute Python integration tests + # continue-on-error: true + env: + NATURAL_LANGUAGE_UNDERSTANDING_APIKEY: ${{ secrets.NLU_APIKEY }} + NATURAL_LANGUAGE_UNDERSTANDING_URL: "https://api.us-south.natural-language-understanding.watson.cloud.ibm.com" + SPEECH_TO_TEXT_APIKEY: ${{ secrets.STT_APIKEY }} + SPEECH_TO_TEXT_URL: "https://api.us-south.speech-to-text.watson.cloud.ibm.com" + TEXT_TO_SPEECH_APIKEY: ${{ secrets.TTS_APIKEY }} + TEXT_TO_SPEECH_URL: "https://api.us-south.text-to-speech.watson.cloud.ibm.com" + ASSISTANT_APIKEY: ${{ secrets.WA_APIKEY }} + ASSISTANT_WORKSPACE_ID: ${{ secrets.WA_WORKSPACE_ID }} + ASSISTANT_ASSISTANT_ID: ${{ secrets.WA_ASSISTANT_ID }} + ASSISTANT_URL: "https://api.us-south.assistant.watson.cloud.ibm.com" + DISCOVERY_V2_APIKEY: ${{ secrets.D2_APIKEY }} + DISCOVERY_V2_PROJECT_ID: ${{ secrets.D2_PROJECT_ID }} + DISCOVERY_V2_COLLECTION_ID: ${{ secrets.D2_COLLECTION_ID }} + DISCOVERY_V2_URL: "https://api.us-south.discovery.watson.cloud.ibm.com" + run: | + pip3 install -U python-dotenv + pytest test/integration/test_discovery_v2.py -rap + pytest test/integration/test_natural_language_understanding_v1.py -rap + pytest test/integration/test_speech_to_text_v1.py -rap + pytest test/integration/test_text_to_speech_v1.py -rap + + # Do not notify on success. We will leave the code here just in case we decide to switch gears + - name: Notify slack on success + if: false # success() + env: + SLACK_BOT_TOKEN: ${{ secrets.SLACK_NOTIFICATIONS_BOT_TOKEN }} + uses: voxmedia/github-action-slack-notify-build@v1 + with: + channel: watson-e2e-tests + status: SUCCESS + color: good + + - name: Notify slack on failure + if: false # failure() + env: + SLACK_BOT_TOKEN: ${{ secrets.SLACK_NOTIFICATIONS_BOT_TOKEN }} + uses: voxmedia/github-action-slack-notify-build@v1 + with: + channel: watson-e2e-tests + status: FAILED + color: danger diff --git a/.gitignore b/.gitignore index 5d029b821..54f100b3d 100644 --- a/.gitignore +++ b/.gitignore @@ -48,6 +48,7 @@ coverage.xml # virtual env venv/ +.venv/ # python 3 virtual env python3/ @@ -66,6 +67,8 @@ test/__init__.py .sfdx/tools/apex.db .pytest_cache/ -# ignore detect secrets files +# ignore pre-commit config file .pre-commit-config.yaml -.secrets.baseline + +.openapi-generator-ignore +.openapi-generator/ \ No newline at end of file diff --git a/.pylintrc b/.pylintrc index cb3aab63c..b6d2de7b9 100644 --- a/.pylintrc +++ b/.pylintrc @@ -1,4 +1,4 @@ # lint Python modules using external checkers. [MASTER] ignore=SVN -disable=R0903,R0912,R0913,R0914,R0915,W0141,C0111,C0103,W0603,W0703,R0911,C0301,C0302,R0902,R0904,W0142,W0212,E1101,E1103,R0201,W0201,W0122,W0232,RP0001,RP0003,RP0101,RP0002,RP0401,RP0701,RP0801,F0401,E0611,R0801,I0011,F0401,E0611,E1004,C0111,I0011,I0012,W0704,W0142,W0212,W0232,W0613,W0702,R0201,W0614,R0914,R0912,R0915,R0913,R0904,R0801,C0301,C0411,R0204,W0622,E1121,inconsistent-return-statements +disable=R0903,R0912,R0913,R0914,R0915,W0141,C0111,C0103,W0603,W0703,R0911,C0301,C0302,R0902,R0904,W0142,W0212,E1101,E1103,R0201,W0201,W0122,W0232,RP0001,RP0003,RP0101,RP0002,RP0401,RP0701,RP0801,F0401,E0611,R0801,I0011,F0401,E0611,E1004,C0111,I0011,I0012,W0704,W0142,W0212,W0232,W0613,W0702,R0201,W0614,R0914,R0912,R0915,R0913,R0904,R0801,C0301,C0411,R0204,W0622,E1121,inconsistent-return-statements,R0205,C0325,unsubscriptable-object diff --git a/.releaserc b/.releaserc index 69775b042..1de8456e5 100644 --- a/.releaserc +++ b/.releaserc @@ -1,15 +1,15 @@ { - "branch": "master", - "verifyConditions": [], - "prepare": [ - { - "path": "@semantic-release/exec", - "cmd": "bumpversion --current-version ${lastRelease.version} --new-version ${nextRelease.version} patch" - } - ], - "publish": [ - { - "path": "@semantic-release/github", - } + "debug": true, + "plugins": [ + "@semantic-release/commit-analyzer", + "@semantic-release/release-notes-generator", + "@semantic-release/changelog", + [ + "@semantic-release/exec", + { + "prepareCmd": "bumpversion --allow-dirty --current-version ${lastRelease.version} --new-version ${nextRelease.version} patch" + } + ], + "@semantic-release/github" ] -} \ No newline at end of file +} diff --git a/.secrets.baseline b/.secrets.baseline new file mode 100644 index 000000000..171a5e191 --- /dev/null +++ b/.secrets.baseline @@ -0,0 +1,222 @@ +{ + "exclude": { + "files": "package-lock.json|^.secrets.baseline$", + "lines": null + }, + "generated_at": "2024-02-26T19:01:03Z", + "plugins_used": [ + { + "name": "AWSKeyDetector" + }, + { + "name": "ArtifactoryDetector" + }, + { + "base64_limit": 4.5, + "name": "Base64HighEntropyString" + }, + { + "name": "BasicAuthDetector" + }, + { + "name": "BoxDetector" + }, + { + "name": "CloudantDetector" + }, + { + "ghe_instance": "github.ibm.com", + "name": "GheDetector" + }, + { + "hex_limit": 3, + "name": "HexHighEntropyString" + }, + { + "name": "IbmCloudIamDetector" + }, + { + "name": "IbmCosHmacDetector" + }, + { + "name": "JwtTokenDetector" + }, + { + "keyword_exclude": null, + "name": "KeywordDetector" + }, + { + "name": "MailchimpDetector" + }, + { + "name": "PrivateKeyDetector" + }, + { + "name": "SlackDetector" + }, + { + "name": "SoftlayerDetector" + }, + { + "name": "StripeDetector" + }, + { + "name": "TwilioKeyDetector" + } + ], + "results": { + "README.md": [ + { + "hashed_secret": "d9e9019d9eb455a3d72a3bc252c26927bb148a10", + "is_secret": false, + "is_verified": false, + "line_number": 118, + "type": "Secret Keyword", + "verified_result": null + }, + { + "hashed_secret": "32e8612d8ca77c7ea8374aa7918db8e5df9252ed", + "is_secret": false, + "is_verified": false, + "line_number": 162, + "type": "Secret Keyword", + "verified_result": null + } + ], + "docs/generate_index_html.sh": [ + { + "hashed_secret": "973f71aa51bf4dcef6aa10f52089747a85c64a73", + "is_secret": false, + "is_verified": false, + "line_number": 12, + "type": "Base64 High Entropy String", + "verified_result": null + } + ], + "ibm_watson/discovery_v1.py": [ + { + "hashed_secret": "e8fc807ce6fbcda13f91c5b64850173873de0cdc", + "is_secret": false, + "is_verified": false, + "line_number": 5683, + "type": "Secret Keyword", + "verified_result": null + }, + { + "hashed_secret": "fdee05598fdd57ff8e9ae29e92c25a04f2c52fa6", + "is_secret": false, + "is_verified": false, + "line_number": 5684, + "type": "Secret Keyword", + "verified_result": null + } + ], + "resources/dummy-storage-credentials.json": [ + { + "hashed_secret": "1b9863aec116b7c1c537f8100173aba52d7384d7", + "is_secret": false, + "is_verified": false, + "line_number": 2, + "type": "Secret Keyword", + "verified_result": null + } + ], + "test/integration/test_discovery_v1.py": [ + { + "hashed_secret": "b60d121b438a380c343d5ec3c2037564b82ffef3", + "is_secret": false, + "is_verified": false, + "line_number": 168, + "type": "Secret Keyword", + "verified_result": null + } + ], + "test/unit/test_assistant_v1.py": [ + { + "hashed_secret": "d506bd5213c46bd49e16c634754ad70113408252", + "is_secret": false, + "is_verified": false, + "line_number": 7986, + "type": "Secret Keyword", + "verified_result": null + }, + { + "hashed_secret": "b8473b86d4c2072ca9b08bd28e373e8253e865c4", + "is_secret": false, + "is_verified": false, + "line_number": 11590, + "type": "Secret Keyword", + "verified_result": null + } + ], + "test/unit/test_assistant_v2.py": [ + { + "hashed_secret": "d506bd5213c46bd49e16c634754ad70113408252", + "is_secret": false, + "is_verified": false, + "line_number": 1393, + "type": "Secret Keyword", + "verified_result": null + }, + { + "hashed_secret": "b8473b86d4c2072ca9b08bd28e373e8253e865c4", + "is_secret": false, + "is_verified": false, + "line_number": 8441, + "type": "Secret Keyword", + "verified_result": null + } + ], + "test/unit/test_discovery_v1.py": [ + { + "hashed_secret": "8318df9ecda039deac9868adf1944a29a95c7114", + "is_secret": false, + "is_verified": false, + "line_number": 7053, + "type": "Secret Keyword", + "verified_result": null + }, + { + "hashed_secret": "b8473b86d4c2072ca9b08bd28e373e8253e865c4", + "is_secret": false, + "is_verified": false, + "line_number": 8245, + "type": "Secret Keyword", + "verified_result": null + }, + { + "hashed_secret": "b8e758b5ad59a72f146fcf065239d5c7b695a39a", + "is_secret": false, + "is_verified": false, + "line_number": 10479, + "type": "Hex High Entropy String", + "verified_result": null + } + ], + "test/unit/test_discovery_v2.py": [ + { + "hashed_secret": "b8473b86d4c2072ca9b08bd28e373e8253e865c4", + "is_secret": false, + "is_verified": false, + "line_number": 6882, + "type": "Secret Keyword", + "verified_result": null + } + ], + "test/unit/test_speech_to_text_v1.py": [ + { + "hashed_secret": "b8473b86d4c2072ca9b08bd28e373e8253e865c4", + "is_secret": false, + "is_verified": false, + "line_number": 432, + "type": "Secret Keyword", + "verified_result": null + } + ] + }, + "version": "0.13.1+ibm.56.dss", + "word_list": { + "file": null, + "hash": null + } +} diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index d3235e10b..000000000 --- a/.travis.yml +++ /dev/null @@ -1,48 +0,0 @@ -language: python -matrix: - include: - - python: 2.7 - - python: 3.5 - - python: 3.6 - - python: 3.7 - dist: xenial -cache: pip -before_install: -- '[ "${TRAVIS_PULL_REQUEST}" = "false" ] && openssl aes-256-cbc -K $encrypted_cebf25e6c525_key - -iv $encrypted_cebf25e6c525_iv -in .env.enc -out .env -d || true' -- npm install npm@latest -g -install: -- pip install tox-travis -- pip install bumpversion -- npm install @semantic-release/exec -script: -- pip install -U python-dotenv -- tox -before_deploy: -- sudo apt-get update -- pip install -r requirements.txt -- pip install -r requirements-dev.txt -- pip install pypandoc -- sudo apt-get install pandoc -- pip install --editable . -deploy: -- provider: script - script: docs/publish.sh - skip_cleanup: true - on: - python: '3.5' - tags: true -- provider: script - script: npx semantic-release - skip_cleanup: true - on: - python: '3.5' - branch: master -- provider: pypi - user: watson-devex - password: $PYPI_PASSWORD - repository: https://upload.pypi.org/legacy - skip_cleanup: true - on: - python: '3.5' - tags: true diff --git a/CHANGELOG.md b/CHANGELOG.md index f7e2525c2..d3d27edf0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1 +1,418 @@ +# [9.0.0](https://github.com/watson-developer-cloud/python-sdk/compare/v8.1.0...v9.0.0) (2024-12-04) + + +### Features + +* **discov2:** add functions for new batches api ([043eed4](https://github.com/watson-developer-cloud/python-sdk/commit/043eed48f1808ad3c0c325be18e2bd7ecc339c14)) +* **stt:** add new speech models ([4948b8f](https://github.com/watson-developer-cloud/python-sdk/commit/4948b8f210e5b9cd2d856aa90f2262a8bdf64444)) +* **stt:** readd interimResults and lowLatency wss params ([ffc67b8](https://github.com/watson-developer-cloud/python-sdk/commit/ffc67b8a0b213530cda23157848d79b5fea4b146)) +* **WxA:** add new functions and update required params ([3fe6243](https://github.com/watson-developer-cloud/python-sdk/commit/3fe62430c57e660b0903b0988fa3c53c489012d3)) +* Add support for message streaming and new APIs + +New functions: create_providers, list_providers, update_providers, create_release_export, download_release_export, create_release_import, get_release_import_status, message_stream, message_stream_stateless, parse_sse_stream_data, list_batches, pull_batches, push_batches + +### BREAKING CHANGES + +* **WxA:** `environmentId` now required for `message` and `messageStateless` functions +* **lt:** LanguageTranslator functionality has been removed +* **discov1:** DiscoveryV1 functionality has been removed +* **nlu:** training_data_content_type default changed to None + +# [8.1.0](https://github.com/watson-developer-cloud/python-sdk/compare/v8.0.0...v8.1.0) (2024-05-17) + + +### Features + +* **stt:** remove interim_results and low_latency wss params ([035b29d](https://github.com/watson-developer-cloud/python-sdk/commit/035b29d82c35789f782359a9842e50956665b96c)) +* **stt:** add speech_begin_event param to recognize func ([d026ab2](https://github.com/watson-developer-cloud/python-sdk/commit/d026ab2a7ffa950a7ba6b655357f2523cda337ef)) +* **disco-v2:** add ocr_enabled parameter ([460593f](https://github.com/watson-developer-cloud/python-sdk/commit/460593f48fe7e32ea3fc205da05d1dad7318255b)) + +# [8.0.0](https://github.com/watson-developer-cloud/python-sdk/compare/v7.0.1...v8.0.0) (2024-02-26) + + +### Features + +* **disco-v2:** class changes ([a109e2e](https://github.com/watson-developer-cloud/python-sdk/commit/a109e2e3f43442fdc0d0c7c09bdf3ccd0682628e)) +* **disco-v2:** new params for EnrichmentOptions ([d980178](https://github.com/watson-developer-cloud/python-sdk/commit/d980178de2ffbf9ffd491113a9a5fd1f82ed4557)) +* **nlu:** add support for userMetadata param ([134fa6d](https://github.com/watson-developer-cloud/python-sdk/commit/134fa6d868396875a33806d1e688156ceecd60c5)) +* **stt:** new params smart_formatting_version, force, mapping_only ([0fa495c](https://github.com/watson-developer-cloud/python-sdk/commit/0fa495cf24438d7a937904735f1dd23e33f3cd31)) +* **wa-v2:** new params orchestration and asyncCallout ([69523c5](https://github.com/watson-developer-cloud/python-sdk/commit/69523c5f023717ff911b714e2a58571f19b51b04)) +* **wa-v2:** support for private variables ([6cd5eba](https://github.com/watson-developer-cloud/python-sdk/commit/6cd5ebae52f93ab64f89bb1dea52b3ef4b27f444)) + + +### BREAKING CHANGES + +* **wa-v2:** Renaming and changing of multiple interfaces + +## [7.0.1](https://github.com/watson-developer-cloud/python-sdk/compare/v7.0.0...v7.0.1) (2022-08-07) + + +### Bug Fixes + +* **tts,stt,version:** unpinned websocket-client ([75432a6](https://github.com/watson-developer-cloud/python-sdk/commit/75432a6ab4b737a3a7afd8009e70f68e6f02d312)) + +# [7.0.0](https://github.com/watson-developer-cloud/python-sdk/compare/v6.0.1...v7.0.0) (2023-03-17) + + +### Bug Fixes + +* **assistantv2:** use original createSession method signature ([ac82c45](https://github.com/watson-developer-cloud/python-sdk/commit/ac82c45c14ddcd0d608496d1193da09d555b6f15)) +* **nlu:** require training_data_content_type ([d91f007](https://github.com/watson-developer-cloud/python-sdk/commit/d91f007fafd568cc30abf15d54c53935f32197a8)) +* **version:** change version strings for release ([aee877c](https://github.com/watson-developer-cloud/python-sdk/commit/aee877ce8ae50f495f1dacfd7cbd26a117aab594)) + + +### Features + +* **assistant-v1:** update models and add new methods ([fbcebd0](https://github.com/watson-developer-cloud/python-sdk/commit/fbcebd088c205070e9bae22821b2a2e8920a07c5)) +* **assistant-v2:** update models and add new methods ([a1586ec](https://github.com/watson-developer-cloud/python-sdk/commit/a1586ec6750e5130493fa8d08ac01d13a36e3715)) +* **assistantv2:** add several new functions ([d2d6fbf](https://github.com/watson-developer-cloud/python-sdk/commit/d2d6fbfce304bdb197b665e612022d4c4cc6b5bd)) +* **assistantv2:** improved typing ([a84cd6c](https://github.com/watson-developer-cloud/python-sdk/commit/a84cd6c983d913811b7943e579126e6a1c71781f)) +* **discov2:** new aggregation types ([41cb185](https://github.com/watson-developer-cloud/python-sdk/commit/41cb1853267528dcedfd49f42710ff28e6885d37)) +* **discovery-v2:** update models and add several new methods ([972a1ae](https://github.com/watson-developer-cloud/python-sdk/commit/972a1ae6f774a4849ffc6e8fe1a77e04090a7441)) +* **nlu:** add trainingParameters ([c8e056c](https://github.com/watson-developer-cloud/python-sdk/commit/c8e056c8d503656271bde6315b84838771975179)) +* **nlu:** remove all sentimentModel functions ([d6e342f](https://github.com/watson-developer-cloud/python-sdk/commit/d6e342f7fc34fdc82cf6042f585d3110bd38abfd)) +* **nlu:** remove beta model param from Sentiment ([1469190](https://github.com/watson-developer-cloud/python-sdk/commit/1469190590cdaff60156816964b88822fef5e933)) +* **release:** trigger release ([c08a117](https://github.com/watson-developer-cloud/python-sdk/commit/c08a117294c9d2a52b8493c1cec55b8826621abc)) +* **stt, tts:** add more models ([8b9f6a8](https://github.com/watson-developer-cloud/python-sdk/commit/8b9f6a897e2e9d3fdb43aa0ce1adc8b2a581f4e9)) +* **stt:** add and remove models ([14fd5f2](https://github.com/watson-developer-cloud/python-sdk/commit/14fd5f22096ac83e99a5c6092fbead23cf309f45)) +* **stt:** update parameters ([e40c06c](https://github.com/watson-developer-cloud/python-sdk/commit/e40c06c52ec00168d9a5f7f0e174c8a1fef65d21)) +* **tts:** add parameters ([b300c55](https://github.com/watson-developer-cloud/python-sdk/commit/b300c5527794eee5ab692a51eb858164dddfef93)) +* **tts:** add params and add model constants ([546796d](https://github.com/watson-developer-cloud/python-sdk/commit/546796d3db37f4af52a7745a62f24e769094b567)) +* **wss:** add and remove websocket params ([1b5f171](https://github.com/watson-developer-cloud/python-sdk/commit/1b5f1715ad92573bc8fce2e44ba8b6e5efda3780)) + + +### BREAKING CHANGES + +* **release:** trigger release +* **assistantv2:** createSession param removed +* **assistantv2:** removing and changing of classes +* **discov2:** confidence property removed +* **discov2:** smartDocumentUnderstanding param removed +* **discov2:** QueryAggregation structure changed +* **nlu:** remove all sentimentModel functions and models + +# [6.1.0](https://github.com/watson-developer-cloud/python-sdk/compare/v6.0.1...v6.1.0) (2022-08-10) + + +### Bug Fixes + +* **assistantv2:** use original createSession method signature ([ac82c45](https://github.com/watson-developer-cloud/python-sdk/commit/ac82c45c14ddcd0d608496d1193da09d555b6f15)) + + +### Features + +* **assistant-v1:** update models and add new methods ([fbcebd0](https://github.com/watson-developer-cloud/python-sdk/commit/fbcebd088c205070e9bae22821b2a2e8920a07c5)) +* **assistant-v2:** update models and add new methods ([a1586ec](https://github.com/watson-developer-cloud/python-sdk/commit/a1586ec6750e5130493fa8d08ac01d13a36e3715)) +* **discovery-v2:** update models and add several new methods ([972a1ae](https://github.com/watson-developer-cloud/python-sdk/commit/972a1ae6f774a4849ffc6e8fe1a77e04090a7441)) +* **nlu:** add trainingParameters ([c8e056c](https://github.com/watson-developer-cloud/python-sdk/commit/c8e056c8d503656271bde6315b84838771975179)) +* **stt:** update parameters ([e40c06c](https://github.com/watson-developer-cloud/python-sdk/commit/e40c06c52ec00168d9a5f7f0e174c8a1fef65d21)) +* **tts:** add parameters ([b300c55](https://github.com/watson-developer-cloud/python-sdk/commit/b300c5527794eee5ab692a51eb858164dddfef93)) +* **wss:** add and remove websocket params ([1b5f171](https://github.com/watson-developer-cloud/python-sdk/commit/1b5f1715ad92573bc8fce2e44ba8b6e5efda3780)) + +# [6.0.0](https://github.com/watson-developer-cloud/python-sdk/compare/v5.3.0...v6.0.0) (2022-03-21) + + +### Bug Fixes + +* **ws:** remove websocket debug code ([21399b7](https://github.com/watson-developer-cloud/python-sdk/commit/21399b769608a25f00fe4790b850ced77a8fc748)) + + +* Major release 2022 (#816) ([97de097](https://github.com/watson-developer-cloud/python-sdk/commit/97de097b8c86622ab2f30f5386bb74321d28addf)), closes [#816](https://github.com/watson-developer-cloud/python-sdk/issues/816) + + +### BREAKING CHANGES + +* OutputData: required text property removed, RuntimeEntity: optional metadata property removed +RuntimeResponseGeneric: Three new response types added +Workspace: workspaceID changed form required to optional + +* feat(assistantv2): add three new response types, rename model, remove properties +* RuntimeEntity: optional metadata property removed, MessageOutputDebug: nodesVisited type DialogNodesVisited changed to DialogNodeVisited. +MessageContext: integrations property added +MessageContextGlobalSystem: skipUserInput property added +MessageContextStateless: integrations property added +MessageInput: attachments property added +MessageInputStateless: attachments property added +RuntimeResponseGeneric: Three new response types added + +* refactor(cc): remove compare and comply ヾ(・‿・) + +* refactor(nlc): remove nlc ヾ(・‿・) + +* feat(nlu): remove MetadataOptions model + +* refactor(lt): comment change and test updates + +* refactor(pi): remove personality insights ヾ(・‿・) + +* feat(stt/tts): add new property and comment changes + +* refactor(ta/visrec): remove ta and visrec ヾ(・‿・) + +* refactor(all): remove remaining traces of removed services + +* feat(assistantv1): add new dialogNode models and additional properties for Workspace + +* feat(discov1): update QueryAggregation subclasses +* QueryAggregation: QueryAggregation subclasses changed. +DocumentStatus: documentID, status, and statusDescription are now optional + +* feat(stt): change grammarFile property type +* addGrammar parameter grammarFile changed from String to Data type + +SupportedFeatures: customAcousticModel property added + +* chore: copyright changes + +* build(secrets): upload detect-secrets baseline + +* docs(readme): add deprecation note and remove old references + +* ci(version): remove python 3.6 support and add 3.9 support + +## [5.3.1](https://github.com/watson-developer-cloud/python-sdk/compare/v5.3.0...v5.3.1) (2022-01-26) + + +### Bug Fixes + +* **ws:** remove websocket debug code ([21399b7](https://github.com/watson-developer-cloud/python-sdk/commit/21399b769608a25f00fe4790b850ced77a8fc748)) + +# [5.3.0](https://github.com/watson-developer-cloud/python-sdk/compare/v5.2.3...v5.3.0) (2021-09-14) + + +### Bug Fixes + +* **disco_v1:** update type of status to reflect service changes ([ab880f0](https://github.com/watson-developer-cloud/python-sdk/commit/ab880f04ae2ec5983e74175cd7b1dc83bd24f022)) +* **disco_v2:** project types enum updated/fixed ([a598231](https://github.com/watson-developer-cloud/python-sdk/commit/a598231df416e8cbf4453342b42ce5a5d8c6be85)) +* **nlu:** fix listClassificationsModels response model ([9954e59](https://github.com/watson-developer-cloud/python-sdk/commit/9954e59df889b45b761ef206ad8dedd9502e762a)) +* **wss:** fix on_transcription parsing issue including tests ([1b05e1b](https://github.com/watson-developer-cloud/python-sdk/commit/1b05e1b3169b8c904fd17c3834d4e61779fa511c)) + + +### Features + +* **assistant_v1:** add alt_text and sensitivity options, location now optional ([0a6c540](https://github.com/watson-developer-cloud/python-sdk/commit/0a6c540f4a5d6abf25e35f9b8b20d6d191744a43)) +* **assistant_v2:** same as v1, add more properties ([c2ca53b](https://github.com/watson-developer-cloud/python-sdk/commit/c2ca53bf1bdc55790787d926ffa23f1cc12b3cee)) +* **assistant_v2,disco_v1:** add answers property to response model, fix typo ([611b7c9](https://github.com/watson-developer-cloud/python-sdk/commit/611b7c91644fb2238712cfed964ea7376b0d076a)) +* **stt&tts:** new models added ([67ee967](https://github.com/watson-developer-cloud/python-sdk/commit/67ee967726c022b9e883bef786cb82a59b86be28)) + + +## [5.2.3](https://github.com/watson-developer-cloud/python-sdk/compare/v5.2.2...v5.2.3) (2021-08-26) + + +### Bug Fixes + +* **nlc:** add deprecation warning ([d1ec209](https://github.com/watson-developer-cloud/python-sdk/commit/d1ec209484320c2a61c735721148a66f58e6f7b1)), closes [#9624](https://github.com/watson-developer-cloud/python-sdk/issues/9624) +* **nlc:** move deprecation warning ([09a6dd4](https://github.com/watson-developer-cloud/python-sdk/commit/09a6dd4d7b26664cb92d8652f8d54ca96d9404a9)) +* **nlc:** move deprecation warning ([3658ee8](https://github.com/watson-developer-cloud/python-sdk/commit/3658ee856c3ddba77a64589631b4605ae3c8c86c)) + +## [5.2.2](https://github.com/watson-developer-cloud/python-sdk/compare/v5.2.1...v5.2.2) (2021-07-06) + + +### Bug Fixes + +* robustify the STT streaming results handling ([#768](https://github.com/watson-developer-cloud/python-sdk/issues/768)) ([264807d](https://github.com/watson-developer-cloud/python-sdk/commit/264807d7eb3287bbca56328496a477e042a9b2ca)) + +## [5.2.1](https://github.com/watson-developer-cloud/python-sdk/compare/v5.2.0...v5.2.1) (2021-06-28) + + +### Bug Fixes + +* **tts:** remove origin header from websocket request ([e151e82](https://github.com/watson-developer-cloud/python-sdk/commit/e151e8271f4ddc2d0217beec8e84896e0e44539f)) + +# [5.2.0](https://github.com/watson-developer-cloud/python-sdk/compare/v5.1.0...v5.2.0) (2021-06-10) + + +### Bug Fixes + +* **compare-comply:** add deprecation notice for CC ([d006937](https://github.com/watson-developer-cloud/python-sdk/commit/d0069376dcd1e23076c12130feeea1b20010de06)) +* **lt:** fix character encoding for non latin langs ([900bd79](https://github.com/watson-developer-cloud/python-sdk/commit/900bd79bbeb7a28b82aa47d2e3345837f62fc306)) +* **nlu:** remove ListCategoriesModelsResponse ([c09022f](https://github.com/watson-developer-cloud/python-sdk/commit/c09022f8841104262351447ac5dccba6ef159805)) +* **tts:** remove extraneous filename param ([d6f9c5d](https://github.com/watson-developer-cloud/python-sdk/commit/d6f9c5d4b75bf303f363dfac68180351d7bcbc26)) + + +### Features + +* **assistantv1:** generation release changes ([ac146a1](https://github.com/watson-developer-cloud/python-sdk/commit/ac146a1efc5e148a636b17540399898c74222a29)) +* **assistantv2:** generation release changes ([d33caba](https://github.com/watson-developer-cloud/python-sdk/commit/d33caba954e764e4b512e4fce62750c2fd9b8cf1)) +* **discov2:** generation release changes ([0ea3ac0](https://github.com/watson-developer-cloud/python-sdk/commit/0ea3ac0ce1024138097e1777f8ecde5b45eaa92d)) +* **nlu:** generation release changes ([e5e71b6](https://github.com/watson-developer-cloud/python-sdk/commit/e5e71b655bc3d7477fd4244df28651fa91a82cd4)) +* **stt-tts:** generation release changes ([0600855](https://github.com/watson-developer-cloud/python-sdk/commit/06008553bdae0d2c0768d41a1d1878523e5bd810)) + +# [5.1.0](https://github.com/watson-developer-cloud/python-sdk/compare/v5.0.2...v5.1.0) (2021-01-12) + + +### Features + +* upate core to use 3.3.6 ([83d5f6a](https://github.com/watson-developer-cloud/python-sdk/commit/83d5f6a7ad4c69fc3c2dbecc6cadee0dc69eeadf)) + +## [5.0.2](https://github.com/watson-developer-cloud/python-sdk/compare/v5.0.1...v5.0.2) (2020-12-28) + + +### Bug Fixes + +* lock JWT version to 1.7.1 ([8fcdfc6](https://github.com/watson-developer-cloud/python-sdk/commit/8fcdfc64db1bcf6a230e8be80de6dfcad8e8811f)) + +## [5.0.1](https://github.com/watson-developer-cloud/python-sdk/compare/v5.0.0...v5.0.1) (2020-12-22) + + +### Bug Fixes + +* **Assistant:** node dialog response should have agent props ([53e532e](https://github.com/watson-developer-cloud/python-sdk/commit/53e532e04ab141d93e54f040965dbca993186543)) + +# [5.0.0](https://github.com/watson-developer-cloud/python-sdk/compare/v4.7.1...v5.0.0) (2020-12-11) + + +### Features + +* regenerate services using latest api def and generator ([59e7ded](https://github.com/watson-developer-cloud/python-sdk/commit/59e7dede81f530ea027b480fd007f9df67180ab1)) +* regenerate using current api def and add deprecation warnings ([c3e1f07](https://github.com/watson-developer-cloud/python-sdk/commit/c3e1f07697b15d05f87cec39e36a9a2d28db7b91)) +* regenerate with current API and add deprecation warnings ([4faa938](https://github.com/watson-developer-cloud/python-sdk/commit/4faa9380a606eeb8e8794b918b0f72313e4b1d86)) +* regenrate language translator ([8fdebc4](https://github.com/watson-developer-cloud/python-sdk/commit/8fdebc45f0dfd1044d848969cb5cb3b8cb15a313)) +* regenrate services using current api def and generator ([e84a0cb](https://github.com/watson-developer-cloud/python-sdk/commit/e84a0cb0636cd0add767903391de150bd65a4cd2)) +* regenrate using current api def and generator 3.21 ([33e0d93](https://github.com/watson-developer-cloud/python-sdk/commit/33e0d9356ac43b7f988200c853b46b6cf4f703ab)) +* **AssistantV1:** add support for bulkClassify ([e17b24c](https://github.com/watson-developer-cloud/python-sdk/commit/e17b24cc565bf6ee603497aeb5c11436ee09b0dc)) +* **AssistantV2:** add support for bulkClassify ([8b14dda](https://github.com/watson-developer-cloud/python-sdk/commit/8b14dda82de980f09a031b1e15ab53573a5b55d8)) +* **CompareComply:** remove before and after from list feedback ([5af17b7](https://github.com/watson-developer-cloud/python-sdk/commit/5af17b7557b2bd3f178c17b7ba7de907c0a3045e)) +* **TextToSpeechV1:** change voice model signaturess to custom models ([12ee072](https://github.com/watson-developer-cloud/python-sdk/commit/12ee072189d54a7b6462c82cb0e5d4d123822ea5)) +* **VisRecV4:** change start time and end time to date from string ([f2f40e7](https://github.com/watson-developer-cloud/python-sdk/commit/f2f40e7a6e9aa90f3938d576081998cb5667a0f8)) + + +### BREAKING CHANGES + +* **VisRecV4:** change start and end time for training usage to date time format +* **CompareComply:** remove before and after from list feedback +* **TextToSpeechV1:** This update breaks the users using any methods of type _voice_models + +## [4.7.1](https://github.com/watson-developer-cloud/python-sdk/compare/v4.7.0...v4.7.1) (2020-09-03) + + +### Bug Fixes + +* lock the cloud sdk library version ([18d5997](https://github.com/watson-developer-cloud/python-sdk/commit/18d5997faa44af4e3c11b217f598dd4e3c75115c)) + +# [4.7.0](https://github.com/watson-developer-cloud/python-sdk/compare/v4.6.0...v4.7.0) (2020-09-03) + + +### Features + +* **DiscoveryV2:** add support for analyze document ([6353f53](https://github.com/watson-developer-cloud/python-sdk/commit/6353f53361f0c1998b746308a7713f1c0dbc172d)) + +# [4.6.0](https://github.com/watson-developer-cloud/python-sdk/compare/v4.5.0...v4.6.0) (2020-08-25) + + +### Features + +* generate unit tests using current api defs ([99555f0](https://github.com/watson-developer-cloud/python-sdk/commit/99555f017a08fa42457fbebdaf4d03a854482683)) +* regenrate all services using current api def ([9ef3c6e](https://github.com/watson-developer-cloud/python-sdk/commit/9ef3c6e2df323a2bb7403bef417ddbd34ca6b462)) +* **AssistantV2:** add support for list logs and delete user data ([6b87f9b](https://github.com/watson-developer-cloud/python-sdk/commit/6b87f9bc834f9b23e62a1d7047e8024839a50e36)) +* **discoV2:** add new apis for enrichments, collections and projects ([4388ea2](https://github.com/watson-developer-cloud/python-sdk/commit/4388ea276b5473b13249592127a51e2004a1d82c)) +* **languageTranslatorV3:** add support for list languages ([de83e96](https://github.com/watson-developer-cloud/python-sdk/commit/de83e96e5d4b0a9f2221fc48ca38ef66b0a0c68d)) + +# [4.5.0](https://github.com/watson-developer-cloud/python-sdk/compare/v4.4.1...v4.5.0) (2020-06-04) + + +### Features + +* regenerate services based on current API def ([c538bd2](https://github.com/watson-developer-cloud/python-sdk/commit/c538bd23b28c220cec2261e9d2a770ebedbba860)) +* **AssistantV1:** add support for spelling suggestions ([858af78](https://github.com/watson-developer-cloud/python-sdk/commit/858af780c60edcff5e6c47281f23d3d9c5011861)) +* **AssistantV2:** add support for stateless messages ([c57f248](https://github.com/watson-developer-cloud/python-sdk/commit/c57f248ea920c12bb439b5571ae78fcce144707b)) +* **VisualRecognitionV4:** add support for downloading a model file ([fa2cd1b](https://github.com/watson-developer-cloud/python-sdk/commit/fa2cd1b8e8c0a867e6c509875418d8c32e9e4d06)) + +## [4.4.1](https://github.com/watson-developer-cloud/python-sdk/compare/v4.4.0...v4.4.1) (2020-05-11) + + +### Bug Fixes + +* loading creds from top level directory ([03a3509](https://github.com/watson-developer-cloud/python-sdk/commit/03a3509f497dca9a534fc19cc59498ce80f2f51e)) + +# [4.4.0](https://github.com/watson-developer-cloud/python-sdk/compare/v4.3.0...v4.4.0) (2020-04-24) + + +### Features + +* **AssistantV2:** regenerate based on current API def ([46a812c](https://github.com/watson-developer-cloud/python-sdk/commit/46a812c6f74a9d32a96ad566e2d25d883c24d2f7)) +* regenerate services using current API def ([e9ea20c](https://github.com/watson-developer-cloud/python-sdk/commit/e9ea20cc68a09da4e948c0622e254c31b27b481b)) +* **LanguageTranslator:** add support for auto correct ([230878a](https://github.com/watson-developer-cloud/python-sdk/commit/230878a256d375c92cef0647e2f5efa51b8a5cf0)) +* **SpeechToText:** add support for speech_detector_sensitivity and background_audio_suppression in ([9aa13e9](https://github.com/watson-developer-cloud/python-sdk/commit/9aa13e94558c37ca815d61ff36d0988943c55bf7)) + +# [4.3.0](https://github.com/watson-developer-cloud/python-sdk/compare/v4.2.1...v4.3.0) (2020-02-13) + + +### Features + +* **assistantv1:** New param `include_audi` in `create_synonym` and `update_synonym` and `update_dialog_node ` ([fbe1081](https://github.com/watson-developer-cloud/python-sdk/commit/fbe1081309aaa380d47b3c9016aaedc0a7bb5005)) +* **assistantv1:** New param `include_audit` in `create_example`, `update_example`, `create_counterexample`, `update_counterexample`, `create_entity` ([b1f99ec](https://github.com/watson-developer-cloud/python-sdk/commit/b1f99ec1e4fad3655ff526cab7de5f18e29607a0)) +* **assistantv1:** New param `include_audit` in `create_intent` ([d523706](https://github.com/watson-developer-cloud/python-sdk/commit/d52370646cd9d5aa88bfd67da294dfd5f8ba9800)) +* **assistantv1:** New param `include_audit` in `create_value` ([4d32257](https://github.com/watson-developer-cloud/python-sdk/commit/4d32257464bd87886934c00f5a4e569896a4ac32)) +* **assistantv1:** New param `include_audit` in `create_workspace` and `update_workspace` ([e44cb16](https://github.com/watson-developer-cloud/python-sdk/commit/e44cb16771620f0cbc6f6c03e215c088c5a1beb6)) +* **assistantv1:** New params `append` and `include_audit` in `update_intent` ([3b015f9](https://github.com/watson-developer-cloud/python-sdk/commit/3b015f9b660241c2ab6f6b7f371843edf2c12c59)) +* **assistantv1:** New params `audit` and `include_audit` in `update_value` ([8bac230](https://github.com/watson-developer-cloud/python-sdk/commit/8bac230a824d996f7807f943650c737ca3ae553d)) +* **assistantv1:** New params `include_audit` and `append` in `update_entity` ([e36783d](https://github.com/watson-developer-cloud/python-sdk/commit/e36783d015e7681b626a1cc8c99ee68a9cbf614f)) +* **assistantv1:** New params `interpretation` and `role` in `RuntimeEntity` model ([a44ace8](https://github.com/watson-developer-cloud/python-sdk/commit/a44ace8638db57f17d319932863aa5c5af51dd93)) +* **assistantv2:** `interpretation`, `alternatives` and `role` properties in `RuntimeEntity` ([5ef087f](https://github.com/watson-developer-cloud/python-sdk/commit/5ef087f4b27b0771e098aaec8488e42d94ecd1ce)) +* **assistantv2:** New params `locale` and `reference_time` in `MessageContextGlobalSystem` ([9b7e56e](https://github.com/watson-developer-cloud/python-sdk/commit/9b7e56e85d9fdec8b264c1a2865882c031046998)) +* **vr4:** New objects operations ([cc9eace](https://github.com/watson-developer-cloud/python-sdk/commit/cc9eaced7ac1e693392e0ea2e6eb2ed27c63af9a)) + +## [4.2.1](https://github.com/watson-developer-cloud/python-sdk/compare/v4.2.0...v4.2.1) (2020-01-17) + + +### Bug Fixes + +* **nlu:** Add model property back in CategoriesOptions ([6d5ed34](https://github.com/watson-developer-cloud/python-sdk/commit/6d5ed3404408a32daa90490daa9be24f53512998)) + +# [4.2.0](https://github.com/watson-developer-cloud/python-sdk/compare/v4.1.0...v4.2.0) (2020-01-16) + + +### Features + +* **core:** Update core version ([88106fb](https://github.com/watson-developer-cloud/python-sdk/commit/88106fb9c9460e60363a814565b51a512805f8b9)) +* **stt:** New param `end_of_phrase_silence_time` and `split_transcript_at_phrase_end` in `recognize` ([776dc86](https://github.com/watson-developer-cloud/python-sdk/commit/776dc8635a98489a9ceb8abf155947bb0f39ad8a)) +* **stt:** New param `end_of_phrase_silence_time` and `split_transcription` in recognize_using_websocket ([040946f](https://github.com/watson-developer-cloud/python-sdk/commit/040946f88d6b652f8b8e5638429b69fb1035e79a)) + +# [4.1.0](https://github.com/watson-developer-cloud/python-sdk/compare/v4.0.4...v4.1.0) (2019-11-27) + + +### Features + +* **assistantv1:** New param `new_disambiguation_opt_out ` in `create_dialog_node` ([5a5b840](https://github.com/watson-developer-cloud/python-sdk/commit/5a5b84076ff4b0d87355ed71cf7a2cbb9612c866)) +* **assistantv1:** New param `new_disambiguation_opt_out ` in `update_dialog_node() ` ([6e52e07](https://github.com/watson-developer-cloud/python-sdk/commit/6e52e07b3e3ab0a9bc2687406b8a98c5e5826e33)) +* **assistantv1:** New param `webhooks` in `create_workspace()` and `update_workspace()` ([0134b69](https://github.com/watson-developer-cloud/python-sdk/commit/0134b6981c09fc7132297aeb161eb75029bbd54d)) +* **assistantv1:** New properties `randomize` and `max_ssuggestions` in `WorkspaceSystemSettingsDisambiguation` ([27a8cd7](https://github.com/watson-developer-cloud/python-sdk/commit/27a8cd7173a48fb6aaf909598fc3eb34e1320fe4)) +* **assistantv1:** New property `off_topic` in `WorkspaceSystemSettings` ([5f93c55](https://github.com/watson-developer-cloud/python-sdk/commit/5f93c552828b539b846c9a44df4f69ed888d27b4)) +* **discoveryv1:** `title` property not part of `QueryNoticesResult` and `QueryResult` ([2ce0ad3](https://github.com/watson-developer-cloud/python-sdk/commit/2ce0ad33c91714eb6d9b2adb7ac44ff70ad378e9)) +* **discoveryv2:** Add examples for discoveryv2 ([2b54527](https://github.com/watson-developer-cloud/python-sdk/commit/2b54527725438d229e4acd80dc31d0869bdaa464)) +* **discoveryv2:** New discovery v2 available on CP4D ([73df7e4](https://github.com/watson-developer-cloud/python-sdk/commit/73df7e4a53ef83ad1271b71215ab357f7a538177)) +* **VisualRecognitionv4:** New method `get_training_usage` ([a5bec46](https://github.com/watson-developer-cloud/python-sdk/commit/a5bec467005db9340f6983654c293c94587258d9)) + +## [4.0.4](https://github.com/watson-developer-cloud/python-sdk/compare/v4.0.3...v4.0.4) (2019-11-22) + + +### Bug Fixes + +* **semrelease:** Provide proper git message for semantic release ([88e2c08](https://github.com/watson-developer-cloud/python-sdk/commit/88e2c0806882693d175c5b8aedb1bf187223db79)) + +## [4.0.3](https://github.com/watson-developer-cloud/python-sdk/compare/v4.0.2...v4.0.3) (2019-11-20) + + +### Bug Fixes + +* **bumpversion:** Skip for bumpversion ([fd38d73](https://github.com/watson-developer-cloud/python-sdk/commit/fd38d7395daf3d28e8dd085b0a1c8e9d4358a1b5)) +* **semantic:** remove tag in bumpversion ([bb1a6a9](https://github.com/watson-developer-cloud/python-sdk/commit/bb1a6a93fcbc8ac13df45d78fc2b97b071267699)) +* **semrelease:** Reorder semantic release steps ([1a13a0c](https://github.com/watson-developer-cloud/python-sdk/commit/1a13a0c0bf8522b8ea10146d4daf9059f2595c35)) + +## [4.0.3](https://github.com/watson-developer-cloud/python-sdk/compare/v4.0.2...v4.0.3) (2019-11-20) + + +### Bug Fixes + +* **bumpversion:** Skip for bumpversion ([fd38d73](https://github.com/watson-developer-cloud/python-sdk/commit/fd38d7395daf3d28e8dd085b0a1c8e9d4358a1b5)) +* **semrelease:** Reorder semantic release steps ([1a13a0c](https://github.com/watson-developer-cloud/python-sdk/commit/1a13a0c0bf8522b8ea10146d4daf9059f2595c35)) + +## [4.0.2](https://github.com/watson-developer-cloud/python-sdk/compare/v4.0.1...v4.0.2) (2019-11-11) + + +### Bug Fixes + +* **semantic:** Fix semantic release stale commit ([f0eaafa](https://github.com/watson-developer-cloud/python-sdk/commit/f0eaafa2731b12847c9941687f0f91d73c43d94f)) + Moved to [https://github.com/watson-developer-cloud/python-sdk/wiki/Changelog](https://github.com/watson-developer-cloud/python-sdk/wiki/Changelog) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 60a21455b..3fbf576f1 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,8 +2,7 @@ ## Questions -If you are having difficulties using the APIs or have a question about the IBM Watson Services, -please ask a question on [dW Answers][dw] or [Stack Overflow][stackoverflow]. +If you have issues with the APIs or have a question about the Watson services, see [Stack Overflow](https://stackoverflow.com/questions/tagged/ibm-watson+python). ## Issues @@ -17,14 +16,16 @@ If you want to contribute to the repository, here's a quick guide: 1. Fork the repository 1. Install `virtualenv` and `tox` 1. Develop and test your code changes with [pytest]. - * Respect the original code [style guide][styleguide]. - * Only use spaces for indentation. - * Create minimal diffs - disable on save actions like reformat source code or organize imports. If you feel the source code should be reformatted create a separate PR for this change. - * Check for unnecessary whitespace with `git diff --check` before committing. - * Make sure your code supports Python 2.7, 3.4, 3.5 and 3.6. You can use `pyenv` and `tox` for this + - Respect the original code [style guide][styleguide]. + - Only use spaces for indentation. + - Create minimal diffs - disable on save actions like reformat source code or organize imports. If you feel the source code should be reformatted create a separate PR for this change. + - Check for unnecessary whitespace with `git diff --check` before committing. + - Make sure your code supports Python 3.9, 3.10, 3.11. You can use `pyenv` and `tox` for this 1. Make the test pass 1. Commit your changes -* Commits should follow the [Angular commit message guidelines](https://github.com/angular/angular/blob/master/CONTRIBUTING.md#-commit-message-guidelines). This is because our release tool uses this format for determining release versions and generating changelogs. To make this easier, we recommend using the [Commitizen CLI](https://github.com/commitizen/cz-cli) with the `cz-conventional-changelog` adapter. + +- Commits should follow the [Angular commit message guidelines](https://github.com/angular/angular/blob/master/CONTRIBUTING.md#-commit-message-guidelines). This is because our release tool uses this format for determining release versions and generating changelogs. To make this easier, we recommend using the [Commitizen CLI](https://github.com/commitizen/cz-cli) with the `cz-conventional-changelog` adapter. + 1. Push to your fork and submit a pull request to the `dev` branch ## Running the tests @@ -32,26 +33,29 @@ If you want to contribute to the repository, here's a quick guide: You probably want to set up a [virtualenv]. 1. Clone this repository: - ```sh - git clone https://github.com/watson-developer-cloud/python-sdk.git - ``` + ```sh + git clone https://github.com/watson-developer-cloud/python-sdk.git + ``` 1. Install the sdk as an editable package using the current source: - ```sh - pip install --editable . - ``` + ```sh + pip install --editable . + ``` 1. Install the test dependencies with: - ```sh - pip install -r requirements-dev.txt - ``` + ```sh + pip install -r requirements-dev.txt + ``` 1. Run the test cases with: - ```sh - py.test test - ``` + ```sh + py.test test + ``` + +## Bug Bounty Hunters Notice +API keys found from commit bec3ae23b53782370851e28cbda5033a596b58b5 have already been revoked and will not be accepted for bug bounties. ## Additional Resources -* [General GitHub documentation](https://help.github.com/) -* [GitHub pull request documentation](https://help.github.com/send-pull-requests/) +- [General GitHub documentation](https://help.github.com/) +- [GitHub pull request documentation](https://help.github.com/send-pull-requests/) [dw]: https://developer.ibm.com/answers/questions/ask/?topics=watson [stackoverflow]: http://stackoverflow.com/questions/ask?tags=ibm-watson diff --git a/MIGRATION-V7.md b/MIGRATION-V7.md new file mode 100644 index 000000000..765ac9ad7 --- /dev/null +++ b/MIGRATION-V7.md @@ -0,0 +1,55 @@ +# Upgrading to ibm-watson@8.0 + [Breaking Changes](#breaking-changes) + - [Breaking changes by service](#breaking-changes-by-service) + +- [New Features by Service](#new-features-by-service) + +### Breaking changes by service + +#### Assistant v2 +- Parameter `createSession` removed from `createSession` function +- Class `Environment` property `language` removed +- Class `EnvironmentReleaseReference` renamed to `BaseEnvironmentReleaseReference` +- Class `EnvironmentOrchestration` renamed to `BaseEnvironmentOrchestration` +- Class `SkillReference` renamed to `EnvironmentSkill` + +#### Discovery v2 +- Parameter `smartDocumentUnderstanding` removed from `createCollection` function +- Class `QueryResponsePassage` and `QueryResultPassage` property `confidence` removed +- Class `DocumentClassifierEnrichment` property `enrichmentId` is no longer an optional +- QueryAggregation classes restructured + +#### Natural Language Understanding +- All `sentiment_model` functions removed +- `create_classifications_model`, `update_classifications_model`, `create_categories_model`, and `update_categories_model` now require `training_data_content_type` + +#### Speech to Text +- `AR_AR_BROADBANDMODEL` model removed in favor of `AR_MS_BROADBANDMODEL` model + +### New Features by Service + +#### Assistant v2 +- `create_assistant` function +- `list_assistants` function +- `delete_assistant` function +- `update_environment` function +- `create_release` function +- `delete_release` function +- `get_skill` function +- `update_skill` function +- `export_skills` function +- `import_skills` function +- `import_skills_status` function +- Improved typing for `message` function call +See details of these functions on IBM's documentation site [here](https://cloud.ibm.com/apidocs/assistant-v2?code=python) + +#### Discovery v2 +- Aggregation types `QueryTopicAggregation` and `QueryTrendAggregation` added + +#### Speech to Text +- added `FR_CA_MULTIMEDIA`, `JA_JP_TELEPHONY`, `NL_NL_MULTIMEDIA`, `SV_SE_TELEPHONY` models + +#### Text to Speech +- added `EN_AU_HEIDIEXPRESSIVE`, `EN_AU_JACKEXPRESSIVE`, `EN_US_ALLISONEXPRESSIVE`, `EN_US_EMMAEXPRESSIVE`, `EN_US_LISAEXPRESSIVE`, `EN_US_MICHAELEXPRESSIVE`, `KO_KR_JINV3VOICE` +- Parameters `rate_percentage` and `pitch_percentage` added to `synthesize` function +See details of these new parameters on IBM's documentation site [here](https://cloud.ibm.com/apidocs/text-to-speech?code=python#synthesize) diff --git a/MIGRATION.md b/MIGRATION.md deleted file mode 100644 index 6bd5c5961..000000000 --- a/MIGRATION.md +++ /dev/null @@ -1 +0,0 @@ -Moved to [https://github.com/watson-developer-cloud/python-sdk/wiki/Migration](https://github.com/watson-developer-cloud/python-sdk/wiki/Migration) \ No newline at end of file diff --git a/README.md b/README.md index a4e2ceb33..e99a46d4a 100755 --- a/README.md +++ b/README.md @@ -1,43 +1,22 @@ # Watson Developer Cloud Python SDK -[![Build Status](https://travis-ci.org/watson-developer-cloud/python-sdk.svg?branch=master)](https://travis-ci.org/watson-developer-cloud/python-sdk) -[![Slack](https://wdc-slack-inviter.mybluemix.net/badge.svg)](https://wdc-slack-inviter.mybluemix.net) +[![Build and Test](https://github.com/watson-developer-cloud/python-sdk/workflows/Build%20and%20Test/badge.svg?branch=master)](https://github.com/watson-developer-cloud/python-sdk/actions?query=workflow%3A"Build+and+Test") +[![Deploy and Publish](https://github.com/watson-developer-cloud/python-sdk/workflows/Deploy%20and%20Publish/badge.svg?branch=master)](https://github.com/watson-developer-cloud/python-sdk/actions?query=workflow%3A%22Deploy+and+Publish%22) [![Latest Stable Version](https://img.shields.io/pypi/v/ibm-watson.svg)](https://pypi.python.org/pypi/ibm-watson) [![CLA assistant](https://cla-assistant.io/readme/badge/watson-developer-cloud/python-sdk)](https://cla-assistant.io/watson-developer-cloud/python-sdk) -Python client library to quickly get started with the various [Watson APIs][wdc] services. +## Deprecated builds + +[![Build Status](https://travis-ci.org/watson-developer-cloud/python-sdk.svg?branch=master)](https://travis-ci.org/watson-developer-cloud/python-sdk) -
- Table of Contents - - * [Before you begin](#before-you-begin) - * [Installation](#installation) - * [Examples](#examples) - * [Running in IBM Cloud](#running-in-ibm-cloud) - * [Authentication](#authentication) - * [Getting credentials](#getting-credentials) - * [IAM](#iam) - * [Username and password](#username-and-password) - * [Python version](#python-version) - * [Changes for v1.0](#changes-for-v10) - * [Changes for v2.0](#changes-for-v20) - * [Changes for v3.0](#changes-for-v30) - * [Migration](#migration) - * [Configuring the http client](#configuring-the-http-client-supported-from-v110) - * [Disable SSL certificate verification](#disable-ssl-certificate-verification) - * [Sending request headers](#sending-request-headers) - * [Parsing HTTP response info](#parsing-http-response-info) - * [Dependencies](#dependencies) - * [License](#license) - * [Contributing](#contributing) - * [Featured Projects](#featured-projects) - -
+Python client library to quickly get started with the various [Watson APIs][wdc] services. ## Before you begin -* You need an [IBM Cloud][ibm-cloud-onboarding] account. + +- You need an [IBM Cloud][ibm-cloud-onboarding] account. We now only support `python 3.5` and above ## Installation + To install, use `pip` or `easy_install`: ```bash @@ -66,9 +45,11 @@ sudo -H pip install --ignore-installed six ibm-watson For more details see [#225](https://github.com/watson-developer-cloud/python-sdk/issues/225) c) In case you run into problems installing the SDK in DSX, try + ``` !pip install --upgrade pip ``` + Restarting the kernel For more details see [#405](https://github.com/watson-developer-cloud/python-sdk/issues/405) @@ -88,9 +69,8 @@ Watson services are migrating to token-based Identity and Access Management (IAM - With some service instances, you authenticate to the API by using **[IAM](#iam)**. - In other instances, you authenticate by providing the **[username and password](#username-and-password)** for the service instance. -**Note:** Authenticating with the X-Watson-Authorization-Token header is deprecated. The token continues to work with Cloud Foundry services, but is not supported for services that use Identity and Access Management (IAM) authentication. See [here](#iam) for details. - ### Getting credentials + To find out which authentication to use, view the service credentials. You find the service credentials for authentication the same way for all Watson services: 1. Go to the IBM Cloud [Dashboard](https://cloud.ibm.com/) page. @@ -101,21 +81,21 @@ On this page, you should be able to see your credentials for accessing your serv ### Supplying credentials -There are two ways to supply the credentials you found above to the SDK for authentication. +There are three ways to supply the credentials you found above to the SDK for authentication. -#### Credential file (easier!) +#### Credential file With a credential file, you just need to put the file in the right place and the SDK will do the work of parsing and authenticating. You can get this file by clicking the **Download** button for the credentials in the **Manage** tab of your service instance. The file downloaded will be called `ibm-credentials.env`. This is the name the SDK will search for and **must** be preserved unless you want to configure the file path (more on that later). The SDK will look for your `ibm-credentials.env` file in the following places (in order): -- Your system's home directory - The top-level directory of the project you're using the SDK in +- Your system's home directory As long as you set that up correctly, you don't have to worry about setting any authentication options in your code. So, for example, if you created and downloaded the credential file for your Discovery instance, you just need to do the following: ```python -discovery = DiscoveryV1(version='2018-08-01') +assistant = AssistantV2(version='2024-08-25') ``` And that's it! @@ -130,158 +110,243 @@ export IBM_CREDENTIALS_FILE="" where `` is something like `/home/user/Downloads/.env`. +#### Environment Variables + +Simply set the environment variables using \_ syntax. For example, using your favourite terminal, you can set environment variables for Assistant service instance: + +```bash +export ASSISTANT_APIKEY="" +export ASSISTANT_AUTH_TYPE="iam" +``` + +The credentials will be loaded from the environment automatically + +```python +assistant = AssistantV2(version='2024-08-25') +``` + #### Manually + If you'd prefer to set authentication values manually in your code, the SDK supports that as well. The way you'll do this depends on what type of credentials your service instance gives you. ### IAM IBM Cloud has migrated to token-based Identity and Access Management (IAM) authentication. IAM authentication uses a service API key to get an access token that is passed with the call. Access tokens are valid for approximately one hour and must be regenerated. -You supply either an IAM service **API key** or an **access token**: +You supply either an IAM service **API key** or a **bearer token**: - Use the API key to have the SDK manage the lifecycle of the access token. The SDK requests an access token, ensures that the access token is valid, and refreshes it if necessary. -- Use the access token if you want to manage the lifecycle yourself. For details, see [Authenticating with IAM tokens](https://cloud.ibm.com/docs/services/watson?topic=watson-iam). +- Use the access token if you want to manage the lifecycle yourself. For details, see [Authenticating with IAM tokens](https://cloud.ibm.com/docs/watson?topic=watson-iam). - Use a server-side to generate access tokens using your IAM API key for untrusted environments like client-side scripts. The generated access tokens will be valid for one hour and can be refreshed. -### Generating access tokens using IAM API key +#### Supplying the API key + ```python -# In your API endpoint use this to generate new access tokens -iam_token_manager = IAMTokenManager(iam_apikey='') -token = iam_token_manager.get_token() +from ibm_watson import AssistantV2 +from ibm_cloud_sdk_core.authenticators import IAMAuthenticator + +# In the constructor, letting the SDK manage the token +authenticator = IAMAuthenticator('apikey', + url='') # optional - the default value is https://iam.cloud.ibm.com/identity/token +assistant = AssistantV2(version='2024-08-25', + authenticator=authenticator) +assistant.set_service_url('') ``` -#### Supplying the IAM API key +#### Generating bearer tokens using API key ```python -# In the constructor, letting the SDK manage the IAM token -discovery = DiscoveryV1(version='2018-08-01', - url='', - apikey='', - iam_url='') # optional - the default value is https://iam.cloud.ibm.com/identity/token -``` +from ibm_watson import IAMTokenManager -```python -# after instantiation, letting the SDK manage the IAM token -discovery = DiscoveryV1(version='2018-08-01', url='') -discovery.set_apikey('') +# In your API endpoint use this to generate new bearer tokens +iam_token_manager = IAMTokenManager(apikey='') +token = iam_token_manager.get_token() ``` -#### Supplying the access token +##### Supplying the bearer token + ```python -# in the constructor, assuming control of managing IAM token -discovery = DiscoveryV1(version='2018-08-01', - url='', - iam_access_token='') +from ibm_watson import AssistantV2 +from ibm_cloud_sdk_core.authenticators import BearerTokenAuthenticator + +# in the constructor, assuming control of managing the token +authenticator = BearerTokenAuthenticator('your bearer token') +assistant = AssistantV2(version='2024-08-25', + authenticator=authenticator) +assistant.set_service_url('') ``` +#### Username and password + ```python -# after instantiation, assuming control of managing IAM token -discovery = DiscoveryV1(version='2018-08-01', url='') -discovery.set_iam_access_token('') +from ibm_watson import AssistantV2 +from ibm_cloud_sdk_core.authenticators import BasicAuthenticator + +authenticator = BasicAuthenticator('username', 'password') +assistant = AssistantV2(version='2024-08-25', authenticator=authenticator) +assistant.set_service_url('') ``` -### Username and password +#### No Authentication + ```python -from ibm_watson import DiscoveryV1 -# In the constructor -discovery = DiscoveryV1(version='2018-08-01', url='', username='', password='') +from ibm_watson import AssistantV2 +from ibm_cloud_sdk_core.authenticators import NoAuthAuthenticator + +authenticator = NoAuthAuthenticator() +assistant = AssistantV2(version='2024-08-25', authenticator=authenticator) +assistant.set_service_url('') ``` +### MCSP + +To use the SDK through a third party cloud provider (such as AWS), use the `MCSPAuthenticator`. This will require the base endpoint URL for the MCSP token service (e.g. https://iam.platform.saas.ibm.com) and an apikey. + ```python -# After instantiation -discovery = DiscoveryV1(version='2018-08-01', url='') -discovery.set_username_and_password('', '') +from ibm_watson import AssistantV2 +from ibm_cloud_sdk_core.authenticators import MCSPAuthenticator + +# In the constructor, letting the SDK manage the token +authenticator = MCSPAuthenticator('apikey', 'token_service_endpoint') +assistant = AssistantV2(version='2023-06-15', + authenticator=authenticator) +assistant.set_service_url('') ``` ## Python version -Tested on Python 2.7, 3.5, 3.6, and 3.7. +Tested on Python 3.9, 3.10, and 3.11. + +## Questions -## Changes for v1.0 -Version 1.0 focuses on the move to programmatically-generated code for many of the services. See the [changelog](https://github.com/watson-developer-cloud/python-sdk/wiki/Changelog) for the details. +If you have issues with the APIs or have a question about the Watson services, see [Stack Overflow](https://stackoverflow.com/questions/tagged/ibm-watson+python). + +## Configuring the http client + +To set client configs like timeout use the `set_http_config()` function and pass it a dictionary of configs. See this [documentation](https://requests.readthedocs.io/en/latest/api/) for more information about the options. All options shown except `method`, `url`, `headers`, `params`, `data`, and `auth` are configurable via `set_http_config()`. For example for a Assistant service instance -## Changes for v2.0 -`DetailedResponse` which contains the result, headers and HTTP status code is now the default response for all methods. ```python -from ibm_watson import AssistantV1 +from ibm_watson import AssistantV2 +from ibm_cloud_sdk_core.authenticators import IAMAuthenticator -assistant = AssistantV1( - username='xxx', - password='yyy', - url='', - version='2018-07-10') +authenticator = IAMAuthenticator('your apikey') +assistant = AssistantV2( + version='2024-08-25', + authenticator=authenticator) +assistant.set_service_url('https://api.us-south.assistant.watson.cloud.ibm.com') -response = assistant.list_workspaces(headers={'Custom-Header': 'custom_value'}) -print(response.get_result()) -print(response.get_headers()) -print(response.get_status_code()) +assistant.set_http_config({'timeout': 100}) +response = assistant.message(workspace_id=workspace_id, input={ + 'text': 'What\'s the weather like?'}).get_result() +print(json.dumps(response, indent=2)) ``` -See the [changelog](https://github.com/watson-developer-cloud/python-sdk/wiki/Changelog) for the details. -## Changes for v3.0 -The SDK is generated using OpenAPI Specification(OAS3). Changes are basic reordering of parameters in function calls. +### Use behind a corporate proxy + +To use the SDK with any proxies you may have they can be set as shown below. For documentation on proxies see [here](https://2.python-requests.org/en/latest/user/advanced/#proxies) -The package is renamed to ibm_watson. See the [changelog](https://github.com/watson-developer-cloud/python-sdk/wiki/Changelog) for the details. +See this example configuration: -## Migration -This version includes many breaking changes as a result of standardizing behavior across the new generated services. Full details on migration from previous versions can be found [here](https://github.com/watson-developer-cloud/python-sdk/wiki/Migration). +```python +from ibm_watson import AssistantV2 +from ibm_cloud_sdk_core.authenticators import IAMAuthenticator + +authenticator = IAMAuthenticator('your apikey') +assistant = AssistantV2( + version='2024-08-25', + authenticator=authenticator) +assistant.set_service_url('https://api.us-south.assistant.watson.cloud.ibm.com') + +assistant.set_http_config({'proxies': { + 'http': 'http://10.10.1.10:3128', + 'https': 'http://10.10.1.10:1080', +}}) +``` -## Configuring the http client (Supported from v1.1.0) -To set client configs like timeout use the `with_http_config()` function and pass it a dictionary of configs. +### Sending custom certificates + +To send custom certificates as a security measure in your request, use the cert property of the HTTPS Agent. ```python -from ibm_watson import AssistantV1 +from ibm_watson import AssistantV2 +from ibm_cloud_sdk_core.authenticators import IAMAuthenticator -assistant = AssistantV1( - username='xxx', - password='yyy', - url='', - version='2018-07-10') +authenticator = IAMAuthenticator('your apikey') +assistant = AssistantV2( + version='2024-08-25', + authenticator=authenticator) +assistant.set_service_url('https://api.us-south.assistant.watson.cloud.ibm.com') -assistant.set_http_config({'timeout': 100}) -response = assistant.message(workspace_id=workspace_id, input={ - 'text': 'What\'s the weather like?'}).get_result() -print(json.dumps(response, indent=2)) +assistant.set_http_config({'cert': ('path_to_cert_file','path_to_key_file')}) ``` ## Disable SSL certificate verification + For ICP(IBM Cloud Private), you can disable the SSL certificate verification by: ```python -service.disable_SSL_verification() +service.set_disable_ssl_verification(True) +``` + +Or can set it from extrernal sources. For example set in the environment variable. + +``` +export _DISABLE_SSL=True +``` + +## Setting the service url + +To set the base service to be used when contacting the service + +```python +service.set_service_url('my_new_service_url') +``` + +Or can set it from extrernal sources. For example set in the environment variable. + +``` +export _URL="" ``` ## Sending request headers + Custom headers can be passed in any request in the form of a `dict` as: + ```python headers = { 'Custom-Header': 'custom_value' } ``` + For example, to send a header called `Custom-Header` to a call in Watson Assistant, pass the headers parameter as: + ```python -from ibm_watson import AssistantV1 +from ibm_watson import AssistantV2 +from ibm_cloud_sdk_core.authenticators import IAMAuthenticator -assistant = AssistantV1( - username='xxx', - password='yyy', - url='', - version='2018-07-10') +authenticator = IAMAuthenticator('your apikey') +assistant = AssistantV2( + version='2024-08-25', + authenticator=authenticator) +assistant.set_service_url('https://api.us-south.assistant.watson.cloud.ibm.com') response = assistant.list_workspaces(headers={'Custom-Header': 'custom_value'}).get_result() ``` -## Parsing HTTP response info +## Parsing HTTP response information + If you would like access to some HTTP response information along with the response model, you can set the `set_detailed_response()` to `True`. Since Python SDK `v2.0`, it is set to `True` + ```python -from ibm_watson import AssistantV1 +from ibm_watson import AssistantV2 +from ibm_cloud_sdk_core.authenticators import IAMAuthenticator -assistant = AssistantV1( - username='xxx', - password='yyy', - url='', - version='2018-07-10') +authenticator = IAMAuthenticator('your apikey') +assistant = AssistantV2( + version='2024-08-25', + authenticator=authenticator) +assistant.set_service_url('https://api.us-south.assistant.watson.cloud.ibm.com') assistant.set_detailed_response(True) response = assistant.list_workspaces(headers={'Custom-Header': 'custom_value'}).get_result() @@ -289,6 +354,7 @@ print(response) ``` This would give an output of `DetailedResponse` having the structure: + ```python { 'result': , @@ -296,9 +362,48 @@ This would give an output of `DetailedResponse` having the structure: 'status_code': } ``` + You can use the `get_result()`, `get_headers()` and get_status_code() to return the result, headers and status code respectively. +## Getting the transaction ID + +Every SDK call returns a response with a transaction ID in the `X-Global-Transaction-Id` header. Together the service instance region, this ID helps support teams troubleshoot issues from relevant logs. + +### Suceess + +```python +from ibm_watson import AssistantV2 + +service = AssistantV2(authenticator={my_authenticator}) +response_headers = service.my_service_call().get_headers() +print(response_headers.get('X-Global-Transaction-Id')) +``` + +### Failure + +```python +from ibm_watson import AssistantV2, ApiException + +try: + service = AssistantV2(authenticator={my_authenticator}) + service.my_service_call() +except ApiException as e: + print(e.global_transaction_id) + # OR + print(e.http_response.headers.get('X-Global-Transaction-Id')) +``` + +However, the transaction ID isn't available when the API doesn't return a response for some reason. In that case, you can set your own transaction ID in the request. For example, replace `` in the following example with a unique transaction ID. + +```python +from ibm_watson import AssistantV2 + +service = AssistantV2(authenticator={my_authenticator}) +service.my_service_call(headers={'X-Global-Transaction-Id': ''}) +``` + ## Using Websockets + The Text to Speech service supports synthesizing text to spoken audio using web sockets with the `synthesize_using_websocket`. The Speech to Text service supports recognizing speech to text using web sockets with the `recognize_using_websocket`. These methods need a custom callback class to listen to events. Below is an example of `synthesize_using_websocket`. Note: The service accepts one request per connection. ```py @@ -322,40 +427,88 @@ service.synthesize_using_websocket('I like to pet dogs', ) ``` -## Dependencies +## Cloud Pak for Data -* [requests] -* `python_dateutil` >= 2.5.3 -* [responses] for testing -* Following for web sockets support in speech to text - * `websocket-client` 0.48.0 -* `ibm_cloud_sdk_core` >=0.2.0 +If your service instance is of CP4D, below are two ways of initializing the assistant service. -## Contributing +### 1) Supplying the username, password and authentication url + +The SDK will manage the token for the user + +```python +from ibm_watson import AssistantV2 +from ibm_cloud_sdk_core.authenticators import CloudPakForDataAuthenticator + +authenticator = CloudPakForDataAuthenticator( + '', + '', + '', # should be of the form https://{icp_cluster_host}{instance-id}/api + disable_ssl_verification=True) # Disable ssl verification for authenticator + +assistant = AssistantV2( + version='', + authenticator=authenticator) +assistant.set_service_url('') # should be of the form https://{icp_cluster_host}/{deployment}/assistant/{instance-id}/api +assistant.set_disable_ssl_verification(True) # MAKE SURE SSL VERIFICATION IS DISABLED +``` + +### 2) Supplying the access token + +```python +from ibm_watson import AssistantV2 +from ibm_cloud_sdk_core.authenticators import BearerTokenAuthenticator + +authenticator = BearerTokenAuthenticator('your managed access token') +assistant = AssistantV2(version='', + authenticator=authenticator) +assistant.set_service_url('') # should be of the form https://{icp_cluster_host}/{deployment}/assistant/{instance-id}/api +assistant.set_disable_ssl_verification(True) # MAKE SURE SSL VERIFICATION IS DISABLED +``` + +## Logging -See [CONTRIBUTING.md][CONTRIBUTING]. +### Enable logging -## Featured Projects +```python +import logging +logging.basicConfig(level=logging.DEBUG) +``` + +This would show output of the form: + +``` +DEBUG:urllib3.connectionpool:Starting new HTTPS connection (1): iam.cloud.ibm.com:443 +DEBUG:urllib3.connectionpool:https://iam.cloud.ibm.com:443 "POST /identity/token HTTP/1.1" 200 1809 +DEBUG:urllib3.connectionpool:Starting new HTTPS connection (1): gateway.watsonplatform.net:443 +DEBUG:urllib3.connectionpool:https://gateway.watsonplatform.net:443 "POST /assistant/api/v1/workspaces?version=2018-07-10 HTTP/1.1" 201 None +DEBUG:urllib3.connectionpool:Starting new HTTPS connection (1): gateway.watsonplatform.net:443 +DEBUG:urllib3.connectionpool:https://gateway.watsonplatform.net:443 "GET /assistant/api/v1/workspaces/883a2a44-eb5f-4b1a-96b0-32a90b475ea8?version=2018-07-10&export=true HTTP/1.1" 200 None +DEBUG:urllib3.connectionpool:Starting new HTTPS connection (1): gateway.watsonplatform.net:443 +DEBUG:urllib3.connectionpool:https://gateway.watsonplatform.net:443 "DELETE /assistant/api/v1/workspaces/883a2a44-eb5f-4b1a-96b0-32a90b475ea8?version=2018-07-10 HTTP/1.1" 200 28 +``` + +### Low level request and response dump -Here are some projects that have been using the SDK: +To get low level information of the requests/ responses: -* [NLC ICD-10 Classifier](https://github.com/IBM/nlc-icd10-classifier) -* [Cognitive Moderator Service](https://github.com/IBM/cognitive-moderator-service) +```python +from http.client import HTTPConnection +HTTPConnection.debuglevel = 1 +``` -We'd love to highlight cool open-source projects that use this SDK! If you'd like to get your project added to the list, feel free to make an issue linking us to it. +## Dependencies +- [requests] +- `python_dateutil` >= 2.5.3 +- [responses] for testing +- Following for web sockets support in speech to text + - `websocket-client` 1.1.0 +- `ibm_cloud_sdk_core` >= 3.16.2 + +## Contributing + +See [CONTRIBUTING.md][contributing]. ## License This library is licensed under the [Apache 2.0 license][license]. - -[wdc]: http://www.ibm.com/watson/developercloud/ -[ibm_cloud]: https://cloud.ibm.com/ -[watson-dashboard]: https://cloud.ibm.com/catalog?category=ai -[responses]: https://github.com/getsentry/responses -[requests]: http://docs.python-requests.org/en/latest/ -[examples]: https://github.com/watson-developer-cloud/python-sdk/tree/master/examples -[CONTRIBUTING]: https://github.com/watson-developer-cloud/python-sdk/blob/master/CONTRIBUTING.md -[license]: http://www.apache.org/licenses/LICENSE-2.0 -[vcap_services]: https://cloud.ibm.com/docs/services/watson?topic=watson-vcapServices -[ibm-cloud-onboarding]: https://cloud.ibm.com/registration?target=/developer/watson&cm_sp=WatsonPlatform-WatsonServices-_-OnPageNavLink-IBMWatson_SDKs-_-Python diff --git a/appveyor.yml b/appveyor.yml deleted file mode 100644 index 7d324ea93..000000000 --- a/appveyor.yml +++ /dev/null @@ -1,34 +0,0 @@ -environment: - - matrix: - - - PYTHON: "C:\\Python35" - - PYTHON: "C:\\Python27-x64" - - PYTHON: "C:\\Python36-x64" - -install: - - # Install Python (from the official .msi of https://python.org) and pip when - # not already installed. - - ps: if (-not(Test-Path($env:PYTHON))) { & appveyor\install.ps1 } - - # Prepend newly installed Python to the PATH of this build - - "SET PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%" - - - "python -m pip install --upgrade pip" - - - "pip install --editable ." - - - "pip install -r requirements-dev.txt" - -build: off - -test_script: - - - ps: py.test --reruns 3 --cov=ibm_watson - -deploy: off - -matrix: - fast_finish: true - diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 000000000..ca2dcc4ed --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,5 @@ +#Import Python Base Image(change version as needed) +FROM python:3-slim + +#Install the python SDK +RUN pip install --upgrade ibm-watson>=3.2.0 diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 000000000..d480f47fd --- /dev/null +++ b/docker/README.md @@ -0,0 +1,19 @@ +## Docker +You can use docker to test issues you have with the SDK. + +1. Install docker + - Mac: https://docs.docker.com/docker-for-mac/install/ + - Windows: https://docs.docker.com/docker-for-windows/install/ +1. Download the dockerfile for this SDK and edit as needed. + - Change the python version as needed `FROM python:` + - For valid python base images on docker see https://hub.docker.com/_/python + - Copy code/file that you wish to test into the dockerfile + - Add line `COPY ... ` + - Set dockerfile to execute code file + - Add line `CMD [ "" ]` + + - For more information on dockerfile construction please visit https://docs.docker.com/engine/reference/builder/ +1. Build and run the docker image. + - Navigate to docker file directory + - To build the docker image run `docker build --tag= .` + - To run the docker image run `docker run ` diff --git a/docs/publish_gha.sh b/docs/publish_gha.sh new file mode 100755 index 000000000..ff1ff9e98 --- /dev/null +++ b/docs/publish_gha.sh @@ -0,0 +1,31 @@ +#!/bin/bash +# checking the build/job numbers allows it to only +# publish once even though we test against multiple python versions + +[[ -z "$GHA_BRANCH" ]] && { echo "GHA_BRANCH cannot be null" ; exit 1; } +[[ -z "$GH_TOKEN" ]] && { echo "GH_TOKEN cannot be null" ; exit 1; } + +cd $(dirname $0) +pwd + +echo "Create Docs" +make document +echo "Publishing Docs..." + +git config --global user.email "watdevex@us.ibm.com" +git config --global user.name "watdevex" +git clone --quiet --branch=gh-pages https://${GH_TOKEN}@github.com/watson-developer-cloud/python-sdk.git gh-pages > /dev/null + +pushd gh-pages + # on tagged builds, $GHA_BRANCH is the tag (e.g. v1.2.3), otherwise it's the branch name (e.g. master) + rm -rf $GHA_BRANCH + cp -Rf ../_build/html/ $GHA_BRANCH + ../generate_index_html.sh > index.html + + git add -f . + git commit -m "Docs for $GHA_BRANCH ($GHA_COMMIT)" + git push -fq origin gh-pages > /dev/null +popd + +echo -e "Published Docs for $GHA_BRANCH to gh-pages.\n" + diff --git a/examples/__init__.py b/examples/__init__.py index f5b8b65a1..d932b9f38 100644 --- a/examples/__init__.py +++ b/examples/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2016 IBM All Rights Reserved. +# (C) Copyright IBM Corp. 2015, 2020. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/assistant_tone_analyzer_integration/.env.example b/examples/assistant_tone_analyzer_integration/.env.example deleted file mode 100644 index 416fe383b..000000000 --- a/examples/assistant_tone_analyzer_integration/.env.example +++ /dev/null @@ -1,6 +0,0 @@ -# see README.md for details - -ASSISTANT_APIKEY= -WORKSPACE_ID= - -TONE_ANALYZER_APIKEY= diff --git a/examples/assistant_tone_analyzer_integration/README.md b/examples/assistant_tone_analyzer_integration/README.md deleted file mode 100644 index f75c1c7c7..000000000 --- a/examples/assistant_tone_analyzer_integration/README.md +++ /dev/null @@ -1,32 +0,0 @@ -# Assistant and Tone Analyzer Integration Example - -This example provides sample code for integrating [Tone Analyzer][tone_analyzer] and [Assistant][assistant] in Python 2.6+. All calls are made synchronously. For sample Python 3.5 asynchronous code, please see [https://github.com/aprilwebster/python-sdk][aprilwebster_python_sdk_github]. - - * [tone_detection.py][tone_assistant_integration_example_tone_detection] - sample code to initialize a user object in the assistant payload's context (initUser), to call Tone Analyzer to retrieve tone for a user's input (invokeToneAsync), and to update tone in the user object in the assistant payload's context (updateUserTone). - - * [tone_assistant_integration.v1.py][tone_assistant_integration_example] - sample code to use tone_detection.py to get and add tone to the payload and send a request to the Assistant Service's message endpoint both in a synchronous and asynchronous manner. - - -Requirements to run the sample code - - * [Tone Analyzer Service credentials][ibm_cloud_tone_analyzer_service] - * [Assistant Service credentials][ibm_cloud_assistant_service] - * [Assistant Workspace ID][assistant_simple_workspace] - -Credentials & the Workspace ID can be set in environment properties, a .env file, or directly in the code. - -Dependencies provided in -`init.py` - -Command to run the sample code - -`python tone_assistant_integration.v1.py` - -[assistant]: https://cloud.ibm.com/apidocs/assistant -[tone_analyzer]: https://cloud.ibm.com/apidocs/tone-analyzer -[ibm_cloud_assistant_service]: https://cloud.ibm.com/catalog/services/watson-assistant -[ibm_cloud_tone_analyzer_service]: https://cloud.ibm.com/catalog/services/tone-analyzer -[assistant_simple_workspace]: https://github.com/watson-developer-cloud/conversation-simple#workspace -[tone_assistant_integration_example]: https://github.com/watson-developer-cloud/python-sdk/tree/master/examples/assistant_tone_analyzer_integration/tone_assistant_integration.v1.py -[tone_assistant_integration_example_tone_detection]: https://github.com/watson-developer-cloud/python-sdk/tree/master/examples/assistant_tone_analyzer_integration/tone_detection.py -[aprilwebster_python_sdk_github]: https://github.com/aprilwebster/python-sdk \ No newline at end of file diff --git a/examples/assistant_tone_analyzer_integration/__init__.py b/examples/assistant_tone_analyzer_integration/__init__.py deleted file mode 100644 index 4cdaa2645..000000000 --- a/examples/assistant_tone_analyzer_integration/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2016 IBM All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .ibm_watson import WatsonService -from .ibm_watson import WatsonException -from .assistant import AssistantV1 -from .tone_analyzer_v3 import ToneAnalyzerV3 - - -from .version import __version__ diff --git a/examples/assistant_tone_analyzer_integration/tone_assistant_integration.v1.py b/examples/assistant_tone_analyzer_integration/tone_assistant_integration.v1.py deleted file mode 100644 index b708da4e1..000000000 --- a/examples/assistant_tone_analyzer_integration/tone_assistant_integration.v1.py +++ /dev/null @@ -1,69 +0,0 @@ -from __future__ import print_function -import json -import os -from dotenv import load_dotenv, find_dotenv - -from ibm_watson import AssistantV1 -from ibm_watson import ToneAnalyzerV3 - -# import tone detection -import tone_detection - -# load the .env file containing your environment variables for the required -# services (conversation and tone) -load_dotenv(find_dotenv()) - -# replace with your own assistant credentials or put them in a .env file -assistant = AssistantV1( - iam_apikey=os.environ.get('ASSISTANT_APIKEY') or 'YOUR ASSISTANT APIKEY', - version='2018-07-10') - -# replace with your own tone analyzer credentials -tone_analyzer = ToneAnalyzerV3( - iam_apikey=os.environ.get('TONE_ANALYZER_APIKEY') or 'YOUR TONE ANALYZER APIKEY', - version='2016-05-19') - -# replace with your own workspace_id -workspace_id = os.environ.get('WORKSPACE_ID') or 'YOUR WORKSPACE ID' - -# This example stores tone for each user utterance in conversation context. -# Change this to false, if you do not want to maintain history -global_maintainToneHistoryInContext = True - -# Payload for the Watson Conversation Service -# user input text required - replace "I am happy" with user input text. -global_payload = { - 'workspace_id': workspace_id, - 'input': { - 'text': "I am happy" - } -} - - -def invokeToneConversation(payload, maintainToneHistoryInContext): - """ - invokeToneConversation calls the Tone Analyzer service to get the - tone information for the user's input text (input['text'] in the payload - json object), adds/updates the user's tone in the payload's context, - and sends the payload to the - conversation service to get a response which is printed to screen. - :param payload: a json object containing the basic information needed to - converse with the Conversation Service's message endpoint. - :param maintainHistoryInContext: - - - Note: as indicated below, the console.log statements can be replaced - with application-specific code to process the err or data object - returned by the Conversation Service. - """ - tone = tone_analyzer.tone(tone_input=payload['input'], content_type='application/json').get_result() - conversation_payload = tone_detection.\ - updateUserTone(payload, tone, maintainToneHistoryInContext) - response = assistant.message(workspace_id=workspace_id, - input=conversation_payload['input'], - context=conversation_payload['context']).get_result() - print(json.dumps(response, indent=2)) - - -# synchronous call to conversation with tone included in the context -invokeToneConversation(global_payload, global_maintainToneHistoryInContext) diff --git a/examples/assistant_tone_analyzer_integration/tone_detection.py b/examples/assistant_tone_analyzer_integration/tone_detection.py deleted file mode 100644 index d7b331e6e..000000000 --- a/examples/assistant_tone_analyzer_integration/tone_detection.py +++ /dev/null @@ -1,226 +0,0 @@ -# Copyright 2016 IBM All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" - * Thresholds for identifying meaningful tones returned by the Watson Tone - Analyzer. Current values are - * based on the recommendations made by the Watson Tone Analyzer at - * https://cloud.ibm.com/docs/services/tone-analyzer?topic=tone-analyzer-utgpe - * These thresholds can be adjusted to client/domain requirements. -""" - -PRIMARY_EMOTION_SCORE_THRESHOLD = 0.5 -WRITING_HIGH_SCORE_THRESHOLD = 0.75 -WRITING_NO_SCORE_THRESHOLD = 0.0 -SOCIAL_HIGH_SCORE_THRESHOLD = 0.75 -SOCIAL_LOW_SCORE_THRESHOLD = 0.25 - -# Labels for the tone categories returned by the Watson Tone Analyzer -EMOTION_TONE_LABEL = 'emotion_tone' -LANGUAGE_TONE_LABEL = 'language_tone' -SOCIAL_TONE_LABEL = 'social_tone' - - -def updateUserTone(conversationPayload, toneAnalyzerPayload, maintainHistory): - """ - updateUserTone processes the Tone Analyzer payload to pull out the emotion, - writing and social tones, and identify the meaningful tones (i.e., - those tones that meet the specified thresholds). - The conversationPayload json object is updated to include these tones. - @param conversationPayload json object returned by the Watson Conversation - Service - @param toneAnalyzerPayload json object returned by the Watson Tone Analyzer - Service - @returns conversationPayload where the user object has been updated with tone - information from the toneAnalyzerPayload - """ - emotionTone = None - writingTone = None - socialTone = None - - # if there is no context in a - if 'context' not in conversationPayload: - conversationPayload['context'] = {} - - if 'user' not in conversationPayload['context']: - conversationPayload['context'] = initUser() - - # For convenience sake, define a variable for the user object - user = conversationPayload['context']['user'] - - # Extract the tones - emotion, writing and social - if toneAnalyzerPayload and toneAnalyzerPayload['document_tone']: - for toneCategory in toneAnalyzerPayload['document_tone']['tone_categories']: - if toneCategory['category_id'] == EMOTION_TONE_LABEL: - emotionTone = toneCategory - if toneCategory['category_id'] == LANGUAGE_TONE_LABEL: - writingTone = toneCategory - if toneCategory['category_id'] == SOCIAL_TONE_LABEL: - socialTone = toneCategory - - updateEmotionTone(user, emotionTone, maintainHistory) - updateWritingTone(user, writingTone, maintainHistory) - updateSocialTone(user, socialTone, maintainHistory) - - conversationPayload['context']['user'] = user - - return conversationPayload - - -def initUser(): - """ - initUser initializes a user object containing tone data (from the - Watson Tone Analyzer) - @returns user json object with the emotion, writing and social tones. The - current tone identifies the tone for a specific conversation turn, and the - history provides the conversation for all tones up to the current tone for a - conversation instance with a user. - """ - return { - 'user': { - 'tone': { - 'emotion': { - 'current': None - }, - 'writing': { - 'current': None - }, - 'social': { - 'current': None - } - } - } - } - - - - -def updateEmotionTone(user, emotionTone, maintainHistory): - """ - updateEmotionTone updates the user emotion tone with the primary emotion - - the emotion tone that has a score greater than or equal to the - EMOTION_SCORE_THRESHOLD; otherwise primary emotion will be 'neutral' - @param user a json object representing user information (tone) to be used in - conversing with the Conversation Service - @param emotionTone a json object containing the emotion tones in the payload - returned by the Tone Analyzer - """ - maxScore = 0.0 - primaryEmotion = None - primaryEmotionScore = None - - for tone in emotionTone['tones']: - if tone['score'] > maxScore: - maxScore = tone['score'] - primaryEmotion = tone['tone_name'].lower() - primaryEmotionScore = tone['score'] - - if maxScore <= PRIMARY_EMOTION_SCORE_THRESHOLD: - primaryEmotion = 'neutral' - primaryEmotionScore = None - - # update user emotion tone - user['tone']['emotion']['current'] = primaryEmotion - - if maintainHistory: - if 'history' not in user['tone']['emotion']: - user['tone']['emotion']['history'] = [] - user['tone']['emotion']['history'].append({ - 'tone_name': primaryEmotion, - 'score': primaryEmotionScore - }) - - -def updateWritingTone(user, writingTone, maintainHistory): - """ - updateWritingTone updates the user with the writing tones interpreted based - on the specified thresholds - @param: user a json object representing user information (tone) to be used - in conversing with the Conversation Service - @param: writingTone a json object containing the writing tones in the - payload returned by the Tone Analyzer - """ - currentWriting = [] - currentWritingObject = [] - - # Process each writing tone and determine if it is high or low - for tone in writingTone['tones']: - if tone['score'] >= WRITING_HIGH_SCORE_THRESHOLD: - currentWriting.append(tone['tone_name'].lower() + '_high') - currentWritingObject.append({ - 'tone_name': tone['tone_name'].lower(), - 'score': tone['score'], - 'interpretation': 'likely high' - }) - elif tone['score'] <= WRITING_NO_SCORE_THRESHOLD: - currentWritingObject.append({ - 'tone_name': tone['tone_name'].lower(), - 'score': tone['score'], - 'interpretation': 'no evidence' - }) - else: - currentWritingObject.append({ - 'tone_name': tone['tone_name'].lower(), - 'score': tone['score'], - 'interpretation': 'likely medium' - }) - - # update user writing tone - user['tone']['writing']['current'] = currentWriting - if maintainHistory: - if 'history' not in user['tone']['writing']: - user['tone']['writing']['history'] = [] - user['tone']['writing']['history'].append(currentWritingObject) - - -def updateSocialTone(user, socialTone, maintainHistory): - """ - updateSocialTone updates the user with the social tones interpreted based on - the specified thresholds - @param user a json object representing user information (tone) to be used in - conversing with the Conversation Service - @param socialTone a json object containing the social tones in the payload - returned by the Tone Analyzer - """ - currentSocial = [] - currentSocialObject = [] - - # Process each social tone and determine if it is high or low - for tone in socialTone['tones']: - if tone['score'] >= SOCIAL_HIGH_SCORE_THRESHOLD: - currentSocial.append(tone['tone_name'].lower() + '_high') - currentSocialObject.append({ - 'tone_name': tone['tone_name'].lower(), - 'score': tone['score'], - 'interpretation': 'likely high' - }) - elif tone['score'] <= SOCIAL_LOW_SCORE_THRESHOLD: - currentSocial.append(tone['tone_name'].lower() + '_low') - currentSocialObject.append({ - 'tone_name': tone['tone_name'].lower(), - 'score': tone['score'], - 'interpretation': 'likely low' - }) - else: - currentSocialObject.append({ - 'tone_name': tone['tone_name'].lower(), - 'score': tone['score'], - 'interpretation': 'likely medium' - }) - - # update user social tone - user['tone']['social']['current'] = currentSocial - if maintainHistory: - if not user['tone']['social']['current']: - user['tone']['social']['current'] = [] - user['tone']['social']['current'].append(currentSocialObject) diff --git a/examples/assistant_v1.py b/examples/assistant_v1.py index 6bc766ae8..2316e2d38 100644 --- a/examples/assistant_v1.py +++ b/examples/assistant_v1.py @@ -1,20 +1,19 @@ -from __future__ import print_function import json from ibm_watson import AssistantV1 +# from ibm_cloud_sdk_core.authenticators import IAMAuthenticator -# If service instance provides API key authentication -assistant = AssistantV1( - version='2018-07-10', - ## url is optional, and defaults to the URL below. Use the correct URL for your region. - url='https://gateway.watsonplatform.net/assistant/api', - iam_apikey='YOUR APIKEY') +# Authentication via IAM +# authenticator = IAMAuthenticator('your apikey') # assistant = AssistantV1( -# username='YOUR SERVICE USERNAME', -# password='YOUR SERVICE PASSWORD', -# ## url is optional, and defaults to the URL below. Use the correct URL for your region. -# # url='https://gateway.watsonplatform.net/assistant/api', -# version='2018-07-10') +# version='2018-07-10', +# authenticator=authenticator) +# assistant.set_service_url('https://gateway.watsonplatform.net/assistant/api') + + +# Authentication via external config like VCAP_SERVICES +assistant = AssistantV1(version='2018-07-10') +assistant.set_service_url('https://api.us-south.assistant.watson.cloud.ibm.com') ######################### # Workspaces @@ -196,18 +195,18 @@ 'values': [{ 'value': 'value0', 'patterns': ['\\d{6}\\w{1}\\d{7}'], - 'value_type': 'patterns' + 'type': 'patterns' }, { 'value': 'value1', 'patterns': ['[-9][0-9][0-9][0-9][0-9]~! [1-9][1-9][1-9][1-9][1-9][1-9]'], - 'value_type': + 'type': 'patterns' }, { 'value': 'value2', 'patterns': ['[a-z-9]{17}'], - 'value_type': 'patterns' + 'type': 'patterns' }, { 'value': 'value3', @@ -215,12 +214,12 @@ '\\d{3}(\\ |-)\\d{3}(\\ |-)\\d{4}', '\\(\\d{3}\\)(\\ |-)\\d{3}(\\ |-)\\d{4}' ], - 'value_type': + 'type': 'patterns' }, { 'value': 'value4', 'patterns': ['\\b\\d{5}\\b'], - 'value_type': 'patterns' + 'type': 'patterns' }] }] response = assistant.create_entity( @@ -266,7 +265,7 @@ print(json.dumps(response, indent=2)) response = assistant.update_synonym(workspace_id, 'beverage', 'orange juice', - 'oj', 'OJ').get_result() + 'oj', new_synonym='OJ').get_result() print(json.dumps(response, indent=2)) response = assistant.delete_synonym(workspace_id, 'beverage', 'orange juice', @@ -292,7 +291,7 @@ print(json.dumps(response, indent=2)) response = assistant.update_value(workspace_id, 'test_entity', 'test', - 'example').get_result() + new_value='example').get_result() print(json.dumps(response, indent=2)) response = assistant.delete_value(workspace_id, 'test_entity', @@ -320,7 +319,7 @@ response = assistant.create_dialog_node( workspace_id, create_dialog_node['dialog_node'], - create_dialog_node['description'], + description=create_dialog_node['description'], actions=create_dialog_node['actions']).get_result() print(json.dumps(response, indent=2)) diff --git a/examples/assistant_v2.py b/examples/assistant_v2.py index 90b9a2c43..af28a1c69 100644 --- a/examples/assistant_v2.py +++ b/examples/assistant_v2.py @@ -1,20 +1,12 @@ -from __future__ import print_function import json from ibm_watson import AssistantV2 +from ibm_cloud_sdk_core.authenticators import IAMAuthenticator -# If service instance provides API key authentication +authenticator = IAMAuthenticator('your apikey') assistant = AssistantV2( version='2018-09-20', - ## url is optional, and defaults to the URL below. Use the correct URL for your region. - url='https://gateway.watsonplatform.net/assistant/api', - iam_apikey='YOUR APIKEY') - -# assistant = AssistantV2( -# username='YOUR SERVICE USERNAME', -# password='YOUR SERVICE PASSWORD', -# ## url is optional, and defaults to the URL below. Use the correct URL for your region. -# url='https://gateway.watsonplatform.net/assistant/api', -# version='2018-09-20') + authenticator=authenticator) +assistant.set_service_url('https://api.us-south.assistant.watson.cloud.ibm.com') ######################### # Sessions @@ -39,3 +31,8 @@ } }).get_result() print(json.dumps(message, indent=2)) + +# logs = assistant.list_logs( +# "" +# ) +# print(json.dumps(logs, indent=2)) diff --git a/examples/authorization_v1.py b/examples/authorization_v1.py deleted file mode 100644 index 8cef2af3c..000000000 --- a/examples/authorization_v1.py +++ /dev/null @@ -1,10 +0,0 @@ -import json -from ibm_watson import AuthorizationV1 -from ibm_watson import SpeechToTextV1 - -authorization = AuthorizationV1( - username='YOUR SERVICE USERNAME', password='YOUR SERVICE PASSWORD') - -print( - json.dumps( - authorization.get_token(url=SpeechToTextV1.default_url), indent=2)) diff --git a/examples/discovery_v1.py b/examples/discovery_v1.py deleted file mode 100644 index 0d8d00893..000000000 --- a/examples/discovery_v1.py +++ /dev/null @@ -1,74 +0,0 @@ -# coding: utf-8 -from __future__ import print_function -import json -from ibm_watson import DiscoveryV1 - -# If service instance provides API key authentication -discovery = DiscoveryV1( - version='2018-08-01', - ## url is optional, and defaults to the URL below. Use the correct URL for your region. - url='https://gateway.watsonplatform.net/discovery/api', - iam_apikey='YOUR APIKEY') - -# discovery = DiscoveryV1( -# version='2018-08-01', -# ## url is optional, and defaults to the URL below. Use the correct URL for your region. -# # url='https://gateway.watsonplatform.net/discovery/api', -# username='YOUR SERVICE USERNAME', -# password='YOUR SERVICE PASSWORD') - -environments = discovery.list_environments().get_result() -print(json.dumps(environments, indent=2)) - -news_environment_id = 'system' -print(json.dumps(news_environment_id, indent=2)) - -collections = discovery.list_collections(news_environment_id).get_result() -news_collections = [x for x in collections['collections']] -print(json.dumps(collections, indent=2)) - -configurations = discovery.list_configurations( - environment_id=news_environment_id).get_result() -print(json.dumps(configurations, indent=2)) - -query_results = discovery.query( - news_environment_id, - news_collections[0]['collection_id'], - filter='extracted_metadata.sha1::f5*', - return_fields='extracted_metadata.sha1').get_result() -print(json.dumps(query_results, indent=2)) - -# new_environment = discovery.create_environment(name="new env", description="bogus env").get_result() -# print(new_environment) - -# environment = discovery.get_environment(environment_id=new_environment['environment_id']).get_result() -# if environment['status'] == 'active': -# writable_environment_id = new_environment['environment_id'] -# new_collection = discovery.create_collection(environment_id=writable_environment_id, -# name='Example Collection', -# description="just a test").get_result() - -# print(new_collection) - -# collections = discovery.list_collections(environment_id=writable_environment_id).get_result() -# print(collections) - -# res = discovery.delete_collection(environment_id='', -# collection_id=new_collection['collection_id']).get_result() -# print(res) - -# collections = discovery.list_collections(environment_id=writable_environment_id).get_result() -# print(collections) - -# with open(os.path.join(os.getcwd(), '..','resources', 'simple.html')) as fileinfo: -# res = discovery.add_document(environment_id=writable_environment_id, -# collection_id=collections['collections'][0]['collection_id'], -# file=fileinfo).get_result() -# print(res) - -# res = discovery.get_collection(environment_id=writable_environment_id, -# collection_id=collections['collections'][0]['collection_id']).get_result() -# print(res['document_counts']) - -#res = discovery.delete_environment(environment_id=writable_environment_id).get_result() -#print(res) diff --git a/examples/discovery_v2.py b/examples/discovery_v2.py new file mode 100644 index 000000000..07bbfdd59 --- /dev/null +++ b/examples/discovery_v2.py @@ -0,0 +1,84 @@ +import json +import os +from ibm_watson import DiscoveryV2 +from ibm_watson.discovery_v2 import TrainingExample +from ibm_cloud_sdk_core.authenticators import CloudPakForDataAuthenticator, BearerTokenAuthenticator + +## Important: Discovery v2 is only available on Cloud Pak for Data. ## + +## Authentication ## +## Option 1: username/password +authenticator = CloudPakForDataAuthenticator('', + '', + '', + disable_ssl_verification=True) + +## Option 2: bearer token +authenticator = BearerTokenAuthenticator('your bearer token') + +## Initialize discovery instance ## +discovery = DiscoveryV2(version='2019-11-22', authenticator=authenticator) +discovery.set_service_url( + '' +) +discovery.set_disable_ssl_verification(True) + +PROJECT_ID = 'your project id' +## List Collections ## +collections = discovery.list_collections(project_id=PROJECT_ID).get_result() +print(json.dumps(collections, indent=2)) + +## Component settings ## +settings_result = discovery.get_component_settings( + project_id=PROJECT_ID).get_result() +print(json.dumps(settings_result, indent=2)) + +## Add Document ## +COLLECTION_ID = 'your collection id' +with open(os.path.join(os.getcwd(), '..', 'resources', + 'simple.html')) as fileinfo: + add_document_result = discovery.add_document(project_id=PROJECT_ID, + collection_id=COLLECTION_ID, + file=fileinfo).get_result() +print(json.dumps(add_document_result, indent=2)) +document_id = add_document_result.get('document_id') + +## Create Training Data ## +training_example = TrainingExample(document_id=document_id, + collection_id=COLLECTION_ID, + relevance=1) +create_query = discovery.create_training_query( + project_id=PROJECT_ID, + natural_language_query='How is the weather today?', + examples=[training_example]).get_result() +print(json.dumps(create_query, indent=2)) + +training_queries = discovery.list_training_queries( + project_id=PROJECT_ID).get_result() +print(json.dumps(training_queries, indent=2)) + +## Queries ## +query_result = discovery.query( + project_id=PROJECT_ID, + collection_ids=[COLLECTION_ID], + natural_language_query='How is the weather today?').get_result() +print(json.dumps(query_result, indent=2)) + +autocomplete_result = discovery.get_autocompletion( + project_id=PROJECT_ID, prefix="The content").get_result() +print(json.dumps(autocomplete_result, indent=2)) + +query_notices_result = discovery.query_notices( + project_id=PROJECT_ID, natural_language_query='warning').get_result() +print(json.dumps(query_notices_result, indent=2)) + +list_fields = discovery.list_fields(project_id=PROJECT_ID).get_result() +print(json.dumps(list_fields, indent=2)) + +## Cleanup ## +discovery.delete_training_queries(project_id=PROJECT_ID).get_result() + +delete_document_result = discovery.delete_document( + project_id=PROJECT_ID, collection_id=COLLECTION_ID, + document_id=document_id).get_result() +print(json.dumps(delete_document_result, indent=2)) diff --git a/examples/language_translator_v3.py b/examples/language_translator_v3.py deleted file mode 100644 index 4262b7e48..000000000 --- a/examples/language_translator_v3.py +++ /dev/null @@ -1,51 +0,0 @@ -# coding=utf-8 -from __future__ import print_function -import json -from ibm_watson import LanguageTranslatorV3 - -language_translator = LanguageTranslatorV3( - version='2018-05-01.', - ### url is optional, and defaults to the URL below. Use the correct URL for your region. - # url='https://gateway.watsonplatform.net/language-translator/api', - iam_apikey='YOUR APIKEY') - -# Authenticate with username/password if your service instance doesn't provide an API key -# language_translator = LanguageTranslatorV3( -# version='2018-05-01', -# username='YOUR SERVICE USERNAME', -# password='YOUR SERVICE PASSWORD') - -## Translate -translation = language_translator.translate( - text='Hello', model_id='en-es').get_result() -print(json.dumps(translation, indent=2, ensure_ascii=False)) - -# List identifiable languages -# languages = language_translator.list_identifiable_languages().get_result() -# print(json.dumps(languages, indent=2)) - -# # Identify -# language = language_translator.identify( -# 'Language translator translates text from one language to another').get_result() -# print(json.dumps(language, indent=2)) - -# # List models -# models = language_translator.list_models( -# source='en').get_result() -# print(json.dumps(models, indent=2)) - -# # Create model -# with open('glossary.tmx', 'rb') as glossary: -# response = language_translator.create_model( -# base_model_id='en-es', -# name='custom-english-to-spanish', -# forced_glossary=glossary).get_result() -# print(json.dumps(response, indent=2)) - -# # Delete model -# response = language_translator.delete_model(model_id='').get_result() -# print(json.dumps(response, indent=2)) - -# # Get model details -# model = language_translator.get_model(model_id='').get_result() -# print(json.dumps(model, indent=2)) diff --git a/examples/microphone-speech-to-text.py b/examples/microphone-speech-to-text.py index c56d6e4ad..9174de74f 100644 --- a/examples/microphone-speech-to-text.py +++ b/examples/microphone-speech-to-text.py @@ -6,11 +6,11 @@ # recordings to the queue, and the websocket client would be sending the # recordings to the speech to text service -from __future__ import print_function import pyaudio from ibm_watson import SpeechToTextV1 from ibm_watson.websocket import RecognizeCallback, AudioSource from threading import Thread +from ibm_cloud_sdk_core.authenticators import IAMAuthenticator try: from Queue import Queue, Full @@ -35,9 +35,8 @@ ############################################### # initialize speech to text service -speech_to_text = SpeechToTextV1( - iam_apikey='{YOUR_IAM_API_KEY}', - url='{YOUR_GATEWAY_URL}') +authenticator = IAMAuthenticator('your_api_key') +speech_to_text = SpeechToTextV1(authenticator=authenticator) # define callback for the speech to text service class MyRecognizeCallback(RecognizeCallback): @@ -122,7 +121,7 @@ def pyaudio_callback(in_data, frame_count, time_info, status): pass except KeyboardInterrupt: # stop recording - audio_source.completed_recording() stream.stop_stream() stream.close() audio.terminate() + audio_source.completed_recording() diff --git a/examples/natural_language_classifier_v1.py b/examples/natural_language_classifier_v1.py deleted file mode 100644 index 64183279d..000000000 --- a/examples/natural_language_classifier_v1.py +++ /dev/null @@ -1,56 +0,0 @@ -from __future__ import print_function -import json -import os - -# from os.path import join, dirname -from ibm_watson import NaturalLanguageClassifierV1 - -# If service instance provides API key authentication -service = NaturalLanguageClassifierV1( - ## url is optional, and defaults to the URL below. Use the correct URL for your region. - url='https://gateway.watsonplatform.net/natural-language-classifier/api', - iam_apikey='YOUR APIKEY') - -# service = NaturalLanguageClassifierV1( -# ## url is optional, and defaults to the URL below. Use the correct URL for your region. -# # url='https://gateway.watsonplatform.net/natural-language-classifier/api', -# username='YOUR SERVICE USERNAME', -# password='YOUR SERVICE PASSWORD') - -classifiers = service.list_classifiers().get_result() -print(json.dumps(classifiers, indent=2)) - -# create a classifier -with open( - os.path.join( - os.path.dirname(__file__), '../resources/weather_data_train.csv'), - 'rb') as training_data: - metadata = json.dumps({'name': 'my-classifier', 'language': 'en'}) - classifier = service.create_classifier( - metadata=metadata, training_data=training_data).get_result() - classifier_id = classifier['classifier_id'] - print(json.dumps(classifier, indent=2)) - -status = service.get_classifier(classifier_id).get_result() -print(json.dumps(status, indent=2)) - -if status['status'] == 'Available': - classes = service.classify(classifier_id, 'How hot will it be ' - 'tomorrow?').get_result() - print(json.dumps(classes, indent=2)) - -if status['status'] == 'Available': - collection = [ - '{"text":"How hot will it be today?"}', '{"text":"Is it hot outside?"}' - ] - classes = service.classify_collection(classifier_id, - collection).get_result() - print(json.dumps(classes, indent=2)) - -delete = service.delete_classifier(classifier_id).get_result() -print(json.dumps(delete, indent=2)) - -# example of raising a ValueError -# print(json.dumps( -# service.create_classifier(training_data='', name='weather3', metadata='metadata'), -# indent=2)) diff --git a/examples/natural_language_understanding_v1.py b/examples/natural_language_understanding_v1.py index 45d1ab768..570e74821 100644 --- a/examples/natural_language_understanding_v1.py +++ b/examples/natural_language_understanding_v1.py @@ -1,21 +1,19 @@ -from __future__ import print_function import json from ibm_watson import NaturalLanguageUnderstandingV1 from ibm_watson.natural_language_understanding_v1 import Features, EntitiesOptions, KeywordsOptions +# from ibm_cloud_sdk_core.authenticators import IAMAuthenticator -# If service instance provides API key authentication -service = NaturalLanguageUnderstandingV1( - version='2018-03-16', - ## url is optional, and defaults to the URL below. Use the correct URL for your region. - url='https://gateway.watsonplatform.net/natural-language-understanding/api', - iam_apikey='YOUR APIKEY') - +# Authentication via IAM +# authenticator = IAMAuthenticator('your_api_key') # service = NaturalLanguageUnderstandingV1( # version='2018-03-16', -# ## url is optional, and defaults to the URL below. Use the correct URL for your region. -# # url='https://gateway.watsonplatform.net/natural-language-understanding/api', -# username='YOUR SERVICE USERNAME', -# password='YOUR SERVICE PASSWORD') +# authenticator=authenticator) +# service.set_service_url('https://gateway.watsonplatform.net/natural-language-understanding/api') + +# Authentication via external config like VCAP_SERVICES +service = NaturalLanguageUnderstandingV1( + version='2018-03-16') +service.set_service_url('https://api.us-south.natural-language-understanding.watson.cloud.ibm.com') response = service.analyze( text='Bruce Banner is the Hulk and Bruce Wayne is BATMAN! ' diff --git a/examples/notebooks/assistant_v1.ipynb b/examples/notebooks/assistant_v1.ipynb index 8497e6787..54a1b22e7 100644 --- a/examples/notebooks/assistant_v1.ipynb +++ b/examples/notebooks/assistant_v1.ipynb @@ -10,20 +10,25 @@ { "cell_type": "code", "execution_count": 5, - "metadata": {}, + "metadata": { + "collapsed": true + }, "outputs": [], "source": [ "import json\n", "import sys\n", "import os\n", "sys.path.append(os.path.join(os.getcwd(),'..','..'))\n", - "import ibm_watson" + "import ibm_watson\n", + "from ibm_cloud_sdk_core.authenticators import IAMAuthenticator" ] }, { "cell_type": "code", "execution_count": 6, - "metadata": {}, + "metadata": { + "collapsed": true + }, "outputs": [], "source": [ "API_KEY = os.environ.get('ASSISTANT_APIKEY','')" @@ -32,11 +37,13 @@ { "cell_type": "code", "execution_count": 7, - "metadata": {}, + "metadata": { + "collapsed": true + }, "outputs": [], "source": [ - "assistant = ibm_watson.AssistantV1(iam_apikey=API_KEY,\n", - " version='2018-07-10')" + "authenticator = IAMAuthenticator(API_KEY)\n", + "assistant = ibm_watson.AssistantV1(version='2018-07-10', authenticator=authenticator)" ] }, { @@ -1026,7 +1033,9 @@ { "cell_type": "code", "execution_count": 37, - "metadata": {}, + "metadata": { + "collapsed": true + }, "outputs": [], "source": [ "values = [{\"value\": \"juice\"}]" @@ -1767,28 +1776,30 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "collapsed": true + }, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { - "display_name": "Python 2", + "display_name": "Python 3", "language": "python", - "name": "python2" + "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", - "version": 2 + "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", - "pygments_lexer": "ipython2", - "version": "2.7.10" + "pygments_lexer": "ipython3", + "version": "3.6.3" } }, "nbformat": 4, diff --git a/examples/notebooks/natural_language_understanding_v1.ipynb b/examples/notebooks/natural_language_understanding_v1.ipynb index 5a4ce07ac..98d4445fa 100644 --- a/examples/notebooks/natural_language_understanding_v1.ipynb +++ b/examples/notebooks/natural_language_understanding_v1.ipynb @@ -3,24 +3,29 @@ { "cell_type": "code", "execution_count": 1, - "metadata": {}, + "metadata": { + "collapsed": true + }, "outputs": [], "source": [ "import sys\n", "import os\n", "sys.path.append(os.path.join(os.getcwd(),'..'))\n", "import ibm_watson\n", - "from ibm_watson.natural_language_understanding_v1 import Features, EntitiesOptions, KeywordsOptions" + "from ibm_watson.natural_language_understanding_v1 import Features, EntitiesOptions, KeywordsOptions\n", + "from ibm_cloud_sdk_core.authenticators import IAMAuthenticator" ] }, { "cell_type": "code", "execution_count": 2, - "metadata": {}, + "metadata": { + "collapsed": true + }, "outputs": [], "source": [ - "nlu = ibm_watson.NaturalLanguageUnderstandingV1(version='2018-03-16',\n", - " iam_apikey='YOUR API KEY')" + "authenticator = IAMAuthenticator('YOUR API KEY')\n", + "nlu = ibm_watson.NaturalLanguageUnderstandingV1(version='2018-03-16', authenticator=authenticator)" ] }, { @@ -75,28 +80,30 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "collapsed": true + }, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { - "display_name": "Python with Pixiedust (Spark 2.2)", + "display_name": "Python 3", "language": "python", - "name": "pythonwithpixiedustspark22" + "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", - "version": 2 + "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", - "pygments_lexer": "ipython2", - "version": "2.7.10" + "pygments_lexer": "ipython3", + "version": "3.6.3" } }, "nbformat": 4, diff --git a/examples/personality_insights_v3.py b/examples/personality_insights_v3.py deleted file mode 100755 index d10d6ae47..000000000 --- a/examples/personality_insights_v3.py +++ /dev/null @@ -1,54 +0,0 @@ -""" -The example returns a JSON response whose content is the same as that in - ../resources/personality-v3-expect2.txt -""" -from __future__ import print_function -import json -from os.path import join, dirname -from ibm_watson import PersonalityInsightsV3 -import csv - -# If service instance provides API key authentication -service = PersonalityInsightsV3( - version='2017-10-13', - ## url is optional, and defaults to the URL below. Use the correct URL for your region. - url='https://gateway.watsonplatform.net/personality-insights/api', - iam_apikey='YOUR APIKEY') - -# service = PersonalityInsightsV3( -# version='2017-10-13', -# ## url is optional, and defaults to the URL below. Use the correct URL for your region. -# # url='https://gateway.watsonplatform.net/personality-insights/api', -# username='YOUR SERVICE USERNAME', -# password='YOUR SERVICE PASSWORD') - -############################ -# Profile with JSON output # -############################ - -with open(join(dirname(__file__), '../resources/personality-v3.json')) as \ - profile_json: - profile = service.profile( - profile_json.read(), - 'application/json', - raw_scores=True, - consumption_preferences=True).get_result() - - print(json.dumps(profile, indent=2)) - -########################### -# Profile with CSV output # -########################### - -with open(join(dirname(__file__), '../resources/personality-v3.json')) as \ - profile_json: - response = service.profile( - profile_json.read(), - accept='text/csv', - csv_headers=True).get_result() - -profile = response.content -cr = csv.reader(profile.splitlines()) -my_list = list(cr) -for row in my_list: - print(row) diff --git a/examples/speaker_text_to_speech.py b/examples/speaker_text_to_speech.py index 0b55c4512..a75495dc0 100644 --- a/examples/speaker_text_to_speech.py +++ b/examples/speaker_text_to_speech.py @@ -5,22 +5,14 @@ # passed in the request. When the service responds with the synthesized # audio, the pyaudio would play it in a blocking mode -from __future__ import print_function from ibm_watson import TextToSpeechV1 from ibm_watson.websocket import SynthesizeCallback import pyaudio +from ibm_cloud_sdk_core.authenticators import IAMAuthenticator -# If service instance provides API key authentication -service = TextToSpeechV1( - ## url is optional, and defaults to the URL below. Use the correct URL for your region. - url='https://stream.watsonplatform.net/text-to-speech/api', - iam_apikey='your_apikey') - -# service = TextToSpeechV1( -# ## url is optional, and defaults to the URL below. Use the correct URL for your region. -# # url='https://stream.watsonplatform.net/text-to-speech/api, -# username='YOUR SERVICE USERNAME', -# password='YOUR SERVICE PASSWORD') +authenticator = IAMAuthenticator('your_api_key') +service = TextToSpeechV1(authenticator=authenticator) +service.set_service_url('https://api.us-south.speech-to-text.watson.cloud.ibm.com') class Play(object): """ diff --git a/examples/speech_to_text_v1.py b/examples/speech_to_text_v1.py index b2a267c7d..e0b60a33b 100644 --- a/examples/speech_to_text_v1.py +++ b/examples/speech_to_text_v1.py @@ -1,20 +1,13 @@ -from __future__ import print_function import json from os.path import join, dirname from ibm_watson import SpeechToTextV1 from ibm_watson.websocket import RecognizeCallback, AudioSource import threading +from ibm_cloud_sdk_core.authenticators import IAMAuthenticator -# If service instance provides API key authentication -service = SpeechToTextV1( - ## url is optional, and defaults to the URL below. Use the correct URL for your region. - url='https://stream.watsonplatform.net/speech-to-text/api', - iam_apikey='YOUR APIKEY') - -# service = SpeechToTextV1( -# username='YOUR SERVICE USERNAME', -# password='YOUR SERVICE PASSWORD', -# url='https://stream.watsonplatform.net/speech-to-text/api') +authenticator = IAMAuthenticator('your_api_key') +service = SpeechToTextV1(authenticator=authenticator) +service.set_service_url('https://api.us-south.speech-to-text.watson.cloud.ibm.com') models = service.list_models().get_result() print(json.dumps(models, indent=2)) diff --git a/examples/text_to_speech_v1.py b/examples/text_to_speech_v1.py index bca6882e7..0571592df 100644 --- a/examples/text_to_speech_v1.py +++ b/examples/text_to_speech_v1.py @@ -1,21 +1,13 @@ # coding=utf-8 -from __future__ import print_function import json from os.path import join, dirname from ibm_watson import TextToSpeechV1 from ibm_watson.websocket import SynthesizeCallback +from ibm_cloud_sdk_core.authenticators import IAMAuthenticator -# If service instance provides API key authentication -service = TextToSpeechV1( - ## url is optional, and defaults to the URL below. Use the correct URL for your region. - url='https://stream.watsonplatform.net/text-to-speech/api', - iam_apikey='YOUR APIKEY') - -# service = TextToSpeechV1( -# ## url is optional, and defaults to the URL below. Use the correct URL for your region. -# # url='https://stream.watsonplatform.net/text-to-speech/api, -# username='YOUR SERVICE USERNAME', -# password='YOUR SERVICE PASSWORD') +authenticator = IAMAuthenticator('your_api_key') +service = TextToSpeechV1(authenticator=authenticator) +service.set_service_url('https://api.us-south.text-to-speech.watson.cloud.ibm.com') voices = service.list_voices().get_result() print(json.dumps(voices, indent=2)) @@ -30,18 +22,18 @@ pronunciation = service.get_pronunciation('Watson', format='spr').get_result() print(json.dumps(pronunciation, indent=2)) -voice_models = service.list_voice_models().get_result() +voice_models = service.list_custom_models().get_result() print(json.dumps(voice_models, indent=2)) -# voice_model = service.create_voice_model('test-customization').get_result() -# print(json.dumps(voice_model, indent=2)) +# voice_model = service.create_custom_model('test-customization').get_result() +# print(json.dumps(custom_model, indent=2)) -# updated_voice_model = service.update_voice_model( +# updated_custom_model = service.update_custom_model( # 'YOUR CUSTOMIZATION ID', name='new name').get_result() -# print(updated_voice_model) +# print(updated_custom_model) -# voice_model = service.get_voice_model('YOUR CUSTOMIZATION ID').get_result() -# print(json.dumps(voice_model, indent=2)) +# custom_model = service.get_custom_model('YOUR CUSTOMIZATION ID').get_result() +# print(json.dumps(custom_model, indent=2)) # words = service.list_words('YOUR CUSTOMIZATIONID').get_result() # print(json.dumps(words, indent=2)) diff --git a/examples/tone_analyzer_v3.py b/examples/tone_analyzer_v3.py deleted file mode 100755 index 17a56e085..000000000 --- a/examples/tone_analyzer_v3.py +++ /dev/null @@ -1,89 +0,0 @@ -from __future__ import print_function -import json -from os.path import join, dirname -from ibm_watson import ToneAnalyzerV3 -from ibm_watson.tone_analyzer_v3 import ToneInput - -# If service instance provides API key authentication -service = ToneAnalyzerV3( - ## url is optional, and defaults to the URL below. Use the correct URL for your region. - url='https://gateway.watsonplatform.net/tone-analyzer/api', - version='2017-09-21', - iam_apikey='YOU APIKEY') - -# service = ToneAnalyzerV3( -# ## url is optional, and defaults to the URL below. Use the correct URL for your region. -# # url='https://gateway.watsonplatform.net/tone-analyzer/api', -# username='YOUR SERVICE USERNAME', -# password='YOUR SERVICE PASSWORD', -# version='2017-09-21') - -print("\ntone_chat() example 1:\n") -utterances = [{ - 'text': 'I am very happy.', - 'user': 'glenn' -}, { - 'text': 'It is a good day.', - 'user': 'glenn' -}] -tone_chat = service.tone_chat(utterances).get_result() -print(json.dumps(tone_chat, indent=2)) - -print("\ntone() example 1:\n") -print( - json.dumps( - service.tone( - tone_input='I am very happy. It is a good day.', - content_type="text/plain").get_result(), - indent=2)) - -print("\ntone() example 2:\n") -with open(join(dirname(__file__), - '../resources/tone-example.json')) as tone_json: - tone = service.tone(json.load(tone_json)['text'], content_type="text/plain").get_result() -print(json.dumps(tone, indent=2)) - -print("\ntone() example 3:\n") -with open(join(dirname(__file__), - '../resources/tone-example.json')) as tone_json: - tone = service.tone( - tone_input=json.load(tone_json)['text'], - content_type='text/plain', - sentences=True).get_result() -print(json.dumps(tone, indent=2)) - -print("\ntone() example 4:\n") -with open(join(dirname(__file__), - '../resources/tone-example.json')) as tone_json: - tone = service.tone( - tone_input=json.load(tone_json), - content_type='application/json').get_result() -print(json.dumps(tone, indent=2)) - -print("\ntone() example 5:\n") -with open(join(dirname(__file__), - '../resources/tone-example-html.json')) as tone_html: - tone = service.tone( - json.load(tone_html)['text'], - content_type='text/html').get_result() -print(json.dumps(tone, indent=2)) - -print("\ntone() example 6 with GDPR support:\n") -with open(join(dirname(__file__), - '../resources/tone-example-html.json')) as tone_html: - tone = service.tone( - json.load(tone_html)['text'], - content_type='text/html', - headers={ - 'Custom-Header': 'custom_value' - }) - -print(tone) -print(tone.get_headers()) -print(tone.get_result()) -print(tone.get_status_code()) - -print("\ntone() example 7:\n") -tone_input = ToneInput('I am very happy. It is a good day.') -tone = service.tone(tone_input=tone_input, content_type="application/json").get_result() -print(json.dumps(tone, indent=2)) diff --git a/examples/visual_recognition_v3.py b/examples/visual_recognition_v3.py deleted file mode 100644 index f33ea577a..000000000 --- a/examples/visual_recognition_v3.py +++ /dev/null @@ -1,60 +0,0 @@ -from __future__ import print_function -import json -from os.path import abspath -from ibm_watson import VisualRecognitionV3, ApiException - -test_url = 'https://www.ibm.com/ibm/ginni/images' \ - '/ginni_bio_780x981_v4_03162016.jpg' - -# If service instance provides IAM API key authentication -service = VisualRecognitionV3( - '2018-03-19', - ## url is optional, and defaults to the URL below. Use the correct URL for your region. - url='https://gateway.watsonplatform.net/visual-recognition/api', - iam_apikey='YOUR APIKEY') - -# with open(abspath('resources/cars.zip'), 'rb') as cars, \ -# open(abspath('resources/trucks.zip'), 'rb') as trucks: -# classifier = service.create_classifier('Cars vs Trucks', -# positive_examples={'cars': cars}, -# negative_examples=trucks).get_result() -# print(json.dumps(classifier, indent=2)) - -car_path = abspath("resources/cars.zip") -try: - with open(car_path, 'rb') as images_file: - car_results = service.classify( - images_file=images_file, - threshold='0.1', - classifier_ids=['default']).get_result() - print(json.dumps(car_results, indent=2)) -except ApiException as ex: - print(ex) - -# classifier = service.get_classifier('YOUR CLASSIFIER ID').get_result() -# print(json.dumps(classifier, indent=2)) - -# with open(abspath('resources/car.jpg'), 'rb') as image_file: -# classifier = service.update_classifier('CarsvsTrucks_1479118188', -# positive_examples={'cars_positive_examples': image_file}).get_result() -# print(json.dumps(classifier, indent=2)) - -# faces_result = service.detect_faces(url=test_url).get_result() -# print(json.dumps(faces_result, indent=2)) - -# response = service.delete_classifier(classifier_id='YOUR CLASSIFIER ID').get_result() -# print(json.dumps(response, indent=2)) - -classifiers = service.list_classifiers().get_result() -print(json.dumps(classifiers, indent=2)) - -face_path = abspath('resources/face.jpg') -with open(face_path, 'rb') as image_file: - face_result = service.detect_faces(images_file=image_file).get_result() - print(json.dumps(face_result, indent=2)) - -#Core ml model example -# model_name = '{0}.mlmodel'.format(classifier_id) -# core_ml_model = service.get_core_ml_model(classifier_id).get_result() -# with open('/tmp/{0}'.format(model_name), 'wb') as fp: -# fp.write(core_ml_model.content) diff --git a/ibm_watson/__init__.py b/ibm_watson/__init__.py index 10dff2368..aaa767998 100755 --- a/ibm_watson/__init__.py +++ b/ibm_watson/__init__.py @@ -1,5 +1,5 @@ # coding: utf-8 -# Copyright 2016 IBM All Rights Reserved. +# (C) Copyright IBM Corp. 2016, 2020. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,18 +15,11 @@ from ibm_cloud_sdk_core import IAMTokenManager, DetailedResponse, BaseService, ApiException -from .authorization_v1 import AuthorizationV1 from .assistant_v1 import AssistantV1 from .assistant_v2 import AssistantV2 -from .language_translator_v3 import LanguageTranslatorV3 -from .natural_language_classifier_v1 import NaturalLanguageClassifierV1 from .natural_language_understanding_v1 import NaturalLanguageUnderstandingV1 -from .personality_insights_v3 import PersonalityInsightsV3 from .text_to_speech_v1 import TextToSpeechV1 -from .tone_analyzer_v3 import ToneAnalyzerV3 -from .discovery_v1 import DiscoveryV1 -from .compare_comply_v1 import CompareComplyV1 -from .visual_recognition_v3 import VisualRecognitionV3 +from .discovery_v2 import DiscoveryV2 from .version import __version__ from .common import get_sdk_headers from .speech_to_text_v1_adapter import SpeechToTextV1Adapter as SpeechToTextV1 diff --git a/ibm_watson/assistant_v1.py b/ibm_watson/assistant_v1.py index 3627a7273..7528355d4 100644 --- a/ibm_watson/assistant_v1.py +++ b/ibm_watson/assistant_v1.py @@ -1,6 +1,6 @@ # coding: utf-8 -# Copyright 2018 IBM All Rights Reserved. +# (C) Copyright IBM Corp. 2019, 2024. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,18 +13,31 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +# IBM OpenAPI SDK Code Generator Version: 3.97.0-0e90eab1-20241120-170029 """ The IBM Watson™ Assistant service combines machine learning, natural language -understanding, and integrated dialog tools to create conversation flows between your apps -and your users. -""" +understanding, and an integrated dialog editor to create conversation flows between your +apps and your users. +The Assistant v1 API provides authoring methods your application can use to create or +update a workspace. -from __future__ import absolute_import +API Version: 1.0 +See: https://cloud.ibm.com/docs/assistant +""" +from datetime import datetime +from enum import Enum +from typing import Dict, List, Optional import json +import sys + +from ibm_cloud_sdk_core import BaseService, DetailedResponse +from ibm_cloud_sdk_core.authenticators.authenticator import Authenticator +from ibm_cloud_sdk_core.get_authenticator import get_authenticator_from_environment +from ibm_cloud_sdk_core.utils import convert_model, datetime_to_string, string_to_datetime + from .common import get_sdk_headers -from ibm_cloud_sdk_core import BaseService -from ibm_cloud_sdk_core import datetime_to_string, string_to_datetime ############################################################################## # Service @@ -34,138 +47,122 @@ class AssistantV1(BaseService): """The Assistant V1 service.""" - default_url = 'https://gateway.watsonplatform.net/assistant/api' + DEFAULT_SERVICE_URL = 'https://api.us-south.assistant.watson.cloud.ibm.com' + DEFAULT_SERVICE_NAME = 'assistant' def __init__( - self, - version, - url=default_url, - username=None, - password=None, - iam_apikey=None, - iam_access_token=None, - iam_url=None, - ): + self, + version: str, + authenticator: Authenticator = None, + service_name: str = DEFAULT_SERVICE_NAME, + ) -> None: """ Construct a new client for the Assistant service. - :param str version: The API version date to use with the service, in - "YYYY-MM-DD" format. Whenever the API is changed in a backwards - incompatible way, a new minor version of the API is released. - The service uses the API version for the date you specify, or - the most recent version before that date. Note that you should - not programmatically specify the current date at runtime, in - case the API has been updated since your application's release. - Instead, specify a version date that is compatible with your - application, and don't change it until your application is - ready for a later version. - - :param str url: The base url to use when contacting the service (e.g. - "https://gateway.watsonplatform.net/assistant/api/assistant/api"). - The base url may differ between IBM Cloud regions. - - :param str username: The username used to authenticate with the service. - Username and password credentials are only required to run your - application locally or outside of IBM Cloud. When running on - IBM Cloud, the credentials will be automatically loaded from the - `VCAP_SERVICES` environment variable. - - :param str password: The password used to authenticate with the service. - Username and password credentials are only required to run your - application locally or outside of IBM Cloud. When running on - IBM Cloud, the credentials will be automatically loaded from the - `VCAP_SERVICES` environment variable. - - :param str iam_apikey: An API key that can be used to request IAM tokens. If - this API key is provided, the SDK will manage the token and handle the - refreshing. - - :param str iam_access_token: An IAM access token is fully managed by the application. - Responsibility falls on the application to refresh the token, either before - it expires or reactively upon receiving a 401 from the service as any requests - made with an expired token will fail. - - :param str iam_url: An optional URL for the IAM service API. Defaults to - 'https://iam.cloud.ibm.com/identity/token'. - """ - - BaseService.__init__( - self, - vcap_services_name='conversation', - url=url, - username=username, - password=password, - iam_apikey=iam_apikey, - iam_access_token=iam_access_token, - iam_url=iam_url, - use_vcap_services=True, - display_name='Assistant') + :param str version: Release date of the API version you want to use. + Specify dates in YYYY-MM-DD format. The current version is `2021-11-27`. + + :param Authenticator authenticator: The authenticator specifies the authentication mechanism. + Get up to date information from https://github.com/IBM/python-sdk-core/blob/main/README.md + about initializing the authenticator of your choice. + """ + if version is None: + raise ValueError('version must be provided') + + if not authenticator: + authenticator = get_authenticator_from_environment(service_name) + BaseService.__init__(self, + service_url=self.DEFAULT_SERVICE_URL, + authenticator=authenticator) self.version = version + self.configure_service(service_name) ######################### # Message ######################### - def message(self, - workspace_id, - input=None, - intents=None, - entities=None, - alternate_intents=None, - context=None, - output=None, - nodes_visited_details=None, - **kwargs): + def message( + self, + workspace_id: str, + *, + input: Optional['MessageInput'] = None, + intents: Optional[List['RuntimeIntent']] = None, + entities: Optional[List['RuntimeEntity']] = None, + alternate_intents: Optional[bool] = None, + context: Optional['Context'] = None, + output: Optional['OutputData'] = None, + user_id: Optional[str] = None, + nodes_visited_details: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: """ Get response to user input. Send user input to a workspace and receive a response. - There is no rate limit for this operation. + **Important:** This method has been superseded by the new v2 runtime API. The v2 + API offers significant advantages, including ease of deployment, automatic state + management, versioning, and search capabilities. For more information, see the + [documentation](https://cloud.ibm.com/docs/assistant?topic=assistant-api-overview). :param str workspace_id: Unique identifier of the workspace. - :param MessageInput input: An input object that includes the input text. - :param list[RuntimeIntent] intents: Intents to use when evaluating the user input. - Include intents from the previous response to continue using those intents rather - than trying to recognize intents in the new input. - :param list[RuntimeEntity] entities: Entities to use when evaluating the message. - Include entities from the previous response to continue using those entities - rather than detecting entities in the new input. - :param bool alternate_intents: Whether to return more than one intent. A value of - `true` indicates that all matching intents are returned. - :param Context context: State information for the conversation. To maintain state, - include the context from the previous response. - :param OutputData output: An output object that includes the response to the user, - the dialog nodes that were triggered, and messages from the log. - :param bool nodes_visited_details: Whether to include additional diagnostic - information about the dialog nodes that were visited during processing of the - message. + :param MessageInput input: (optional) An input object that includes the + input text. + :param List[RuntimeIntent] intents: (optional) Intents to use when + evaluating the user input. Include intents from the previous response to + continue using those intents rather than trying to recognize intents in the + new input. + :param List[RuntimeEntity] entities: (optional) Entities to use when + evaluating the message. Include entities from the previous response to + continue using those entities rather than detecting entities in the new + input. + :param bool alternate_intents: (optional) Whether to return more than one + intent. A value of `true` indicates that all matching intents are returned. + :param Context context: (optional) State information for the conversation. + To maintain state, include the context from the previous response. + :param OutputData output: (optional) An output object that includes the + response to the user, the dialog nodes that were triggered, and messages + from the log. + :param str user_id: (optional) A string value that identifies the user who + is interacting with the workspace. The client must provide a unique + identifier for each individual end user who accesses the application. For + user-based plans, this user ID is used to identify unique users for billing + purposes. This string cannot contain carriage return, newline, or tab + characters. If no value is specified in the input, **user_id** is + automatically set to the value of **context.conversation_id**. + **Note:** This property is the same as the **user_id** property in the + context metadata. If **user_id** is specified in both locations in a + message request, the value specified at the root is used. + :param bool nodes_visited_details: (optional) Whether to include additional + diagnostic information about the dialog nodes that were visited during + processing of the message. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `MessageResponse` object """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') if input is not None: - input = self._convert_model(input, MessageInput) + input = convert_model(input) if intents is not None: - intents = [self._convert_model(x, RuntimeIntent) for x in intents] + intents = [convert_model(x) for x in intents] if entities is not None: - entities = [self._convert_model(x, RuntimeEntity) for x in entities] + entities = [convert_model(x) for x in entities] if context is not None: - context = self._convert_model(context, Context) + context = convert_model(context) if output is not None: - output = self._convert_model(output, OutputData) - + output = convert_model(output) headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'message') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='message', + ) headers.update(sdk_headers) params = { 'version': self.version, - 'nodes_visited_details': nodes_visited_details + 'nodes_visited_details': nodes_visited_details, } data = { @@ -174,558 +171,892 @@ def message(self, 'entities': entities, 'alternate_intents': alternate_intents, 'context': context, - 'output': output + 'output': output, + 'user_id': user_id, } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' - url = '/v1/workspaces/{0}/message'.format( - *self._encode_path_vars(workspace_id)) - response = self.request( + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id'] + path_param_values = self.encode_path_vars(workspace_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/message'.format(**path_param_dict) + request = self.prepare_request( method='POST', url=url, headers=headers, params=params, - json=data, - accept_json=True) + data=data, + ) + + response = self.send(request, **kwargs) return response ######################### - # Workspaces + # Bulk classify ######################### - def create_workspace(self, - name=None, - description=None, - language=None, - metadata=None, - learning_opt_out=None, - system_settings=None, - intents=None, - entities=None, - dialog_nodes=None, - counterexamples=None, - **kwargs): + def bulk_classify( + self, + workspace_id: str, + *, + input: Optional[List['BulkClassifyUtterance']] = None, + **kwargs, + ) -> DetailedResponse: """ - Create workspace. + Identify intents and entities in multiple user utterances. - Create a workspace based on component objects. You must provide workspace - components defining the content of the new workspace. - This operation is limited to 30 requests per 30 minutes. For more information, see - **Rate limiting**. - - :param str name: The name of the workspace. This string cannot contain carriage - return, newline, or tab characters, and it must be no longer than 64 characters. - :param str description: The description of the workspace. This string cannot - contain carriage return, newline, or tab characters, and it must be no longer than - 128 characters. - :param str language: The language of the workspace. - :param dict metadata: Any metadata related to the workspace. - :param bool learning_opt_out: Whether training data from the workspace (including - artifacts such as intents and entities) can be used by IBM for general service - improvements. `true` indicates that workspace training data is not to be used. - :param WorkspaceSystemSettings system_settings: Global settings for the workspace. - :param list[CreateIntent] intents: An array of objects defining the intents for - the workspace. - :param list[CreateEntity] entities: An array of objects describing the entities - for the workspace. - :param list[DialogNode] dialog_nodes: An array of objects describing the dialog - nodes in the workspace. - :param list[Counterexample] counterexamples: An array of objects defining input - examples that have been marked as irrelevant input. + Send multiple user inputs to a workspace in a single request and receive + information about the intents and entities recognized in each input. This method + is useful for testing and comparing the performance of different workspaces. + This method is available only with Enterprise with Data Isolation plans. + + :param str workspace_id: Unique identifier of the workspace. + :param List[BulkClassifyUtterance] input: (optional) An array of input + utterances to classify. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `BulkClassifyResponse` object """ - if system_settings is not None: - system_settings = self._convert_model(system_settings, - WorkspaceSystemSettings) - if intents is not None: - intents = [self._convert_model(x, CreateIntent) for x in intents] - if entities is not None: - entities = [self._convert_model(x, CreateEntity) for x in entities] - if dialog_nodes is not None: - dialog_nodes = [ - self._convert_model(x, DialogNode) for x in dialog_nodes - ] - if counterexamples is not None: - counterexamples = [ - self._convert_model(x, Counterexample) for x in counterexamples - ] - + if not workspace_id: + raise ValueError('workspace_id must be provided') + if input is not None: + input = [convert_model(x) for x in input] headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'create_workspace') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='bulk_classify', + ) headers.update(sdk_headers) - params = {'version': self.version} + params = { + 'version': self.version, + } data = { - 'name': name, - 'description': description, - 'language': language, - 'metadata': metadata, - 'learning_opt_out': learning_opt_out, - 'system_settings': system_settings, - 'intents': intents, - 'entities': entities, - 'dialog_nodes': dialog_nodes, - 'counterexamples': counterexamples + 'input': input, } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' - url = '/v1/workspaces' - response = self.request( + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id'] + path_param_values = self.encode_path_vars(workspace_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/bulk_classify'.format( + **path_param_dict) + request = self.prepare_request( method='POST', url=url, headers=headers, params=params, - json=data, - accept_json=True) + data=data, + ) + + response = self.send(request, **kwargs) return response - def delete_workspace(self, workspace_id, **kwargs): + ######################### + # Workspaces + ######################### + + def list_workspaces( + self, + *, + page_limit: Optional[int] = None, + include_count: Optional[bool] = None, + sort: Optional[str] = None, + cursor: Optional[str] = None, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: """ - Delete workspace. + List workspaces. - Delete a workspace from the service instance. - This operation is limited to 30 requests per 30 minutes. For more information, see - **Rate limiting**. + List the workspaces associated with a Watson Assistant service instance. - :param str workspace_id: Unique identifier of the workspace. + :param int page_limit: (optional) The number of records to return in each + page of results. + :param bool include_count: (optional) Whether to include information about + the number of records that satisfy the request, regardless of the page + limit. If this parameter is `true`, the `pagination` object in the response + includes the `total` property. + :param str sort: (optional) The attribute by which returned workspaces will + be sorted. To reverse the sort order, prefix the value with a minus sign + (`-`). + :param str cursor: (optional) A token identifying the page of results to + retrieve. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `WorkspaceCollection` object """ - if workspace_id is None: - raise ValueError('workspace_id must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'delete_workspace') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='list_workspaces', + ) headers.update(sdk_headers) - params = {'version': self.version} + params = { + 'version': self.version, + 'page_limit': page_limit, + 'include_count': include_count, + 'sort': sort, + 'cursor': cursor, + 'include_audit': include_audit, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' - url = '/v1/workspaces/{0}'.format(*self._encode_path_vars(workspace_id)) - response = self.request( - method='DELETE', + url = '/v1/workspaces' + request = self.prepare_request( + method='GET', url=url, headers=headers, params=params, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response - def get_workspace(self, - workspace_id, - export=None, - include_audit=None, - sort=None, - **kwargs): + def create_workspace( + self, + *, + name: Optional[str] = None, + description: Optional[str] = None, + language: Optional[str] = None, + dialog_nodes: Optional[List['DialogNode']] = None, + counterexamples: Optional[List['Counterexample']] = None, + metadata: Optional[dict] = None, + learning_opt_out: Optional[bool] = None, + system_settings: Optional['WorkspaceSystemSettings'] = None, + webhooks: Optional[List['Webhook']] = None, + intents: Optional[List['CreateIntent']] = None, + entities: Optional[List['CreateEntity']] = None, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: """ - Get information about a workspace. - - Get information about a workspace, optionally including all workspace content. - With **export**=`false`, this operation is limited to 6000 requests per 5 minutes. - With **export**=`true`, the limit is 20 requests per 30 minutes. For more - information, see **Rate limiting**. + Create workspace. - :param str workspace_id: Unique identifier of the workspace. - :param bool export: Whether to include all element content in the returned data. - If **export**=`false`, the returned data includes only information about the - element itself. If **export**=`true`, all content, including subelements, is - included. - :param bool include_audit: Whether to include the audit properties (`created` and - `updated` timestamps) in the response. - :param str sort: Indicates how the returned workspace data will be sorted. This - parameter is valid only if **export**=`true`. Specify `sort=stable` to sort all - workspace objects by unique identifier, in ascending alphabetical order. + Create a workspace based on component objects. You must provide workspace + components defining the content of the new workspace. + **Note:** The new workspace data cannot be larger than 1.5 MB. For larger + requests, use the **Create workspace asynchronously** method. + + :param str name: (optional) The name of the workspace. This string cannot + contain carriage return, newline, or tab characters. + :param str description: (optional) The description of the workspace. This + string cannot contain carriage return, newline, or tab characters. + :param str language: (optional) The language of the workspace. + :param List[DialogNode] dialog_nodes: (optional) An array of objects + describing the dialog nodes in the workspace. + :param List[Counterexample] counterexamples: (optional) An array of objects + defining input examples that have been marked as irrelevant input. + :param dict metadata: (optional) Any metadata related to the workspace. + :param bool learning_opt_out: (optional) Whether training data from the + workspace (including artifacts such as intents and entities) can be used by + IBM for general service improvements. `true` indicates that workspace + training data is not to be used. + :param WorkspaceSystemSettings system_settings: (optional) Global settings + for the workspace. + :param List[Webhook] webhooks: (optional) + :param List[CreateIntent] intents: (optional) An array of objects defining + the intents for the workspace. + :param List[CreateEntity] entities: (optional) An array of objects + describing the entities for the workspace. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `Workspace` object """ - if workspace_id is None: - raise ValueError('workspace_id must be provided') - + if dialog_nodes is not None: + dialog_nodes = [convert_model(x) for x in dialog_nodes] + if counterexamples is not None: + counterexamples = [convert_model(x) for x in counterexamples] + if system_settings is not None: + system_settings = convert_model(system_settings) + if webhooks is not None: + webhooks = [convert_model(x) for x in webhooks] + if intents is not None: + intents = [convert_model(x) for x in intents] + if entities is not None: + entities = [convert_model(x) for x in entities] headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'get_workspace') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='create_workspace', + ) headers.update(sdk_headers) params = { 'version': self.version, - 'export': export, 'include_audit': include_audit, - 'sort': sort } - url = '/v1/workspaces/{0}'.format(*self._encode_path_vars(workspace_id)) - response = self.request( - method='GET', + data = { + 'name': name, + 'description': description, + 'language': language, + 'dialog_nodes': dialog_nodes, + 'counterexamples': counterexamples, + 'metadata': metadata, + 'learning_opt_out': learning_opt_out, + 'system_settings': system_settings, + 'webhooks': webhooks, + 'intents': intents, + 'entities': entities, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + url = '/v1/workspaces' + request = self.prepare_request( + method='POST', url=url, headers=headers, params=params, - accept_json=True) + data=data, + ) + + response = self.send(request, **kwargs) return response - def list_workspaces(self, - page_limit=None, - include_count=None, - sort=None, - cursor=None, - include_audit=None, - **kwargs): + def get_workspace( + self, + workspace_id: str, + *, + export: Optional[bool] = None, + include_audit: Optional[bool] = None, + sort: Optional[str] = None, + **kwargs, + ) -> DetailedResponse: """ - List workspaces. + Get information about a workspace. - List the workspaces associated with a Watson Assistant service instance. - This operation is limited to 500 requests per 30 minutes. For more information, - see **Rate limiting**. - - :param int page_limit: The number of records to return in each page of results. - :param bool include_count: Whether to include information about the number of - records returned. - :param str sort: The attribute by which returned workspaces will be sorted. To - reverse the sort order, prefix the value with a minus sign (`-`). - :param str cursor: A token identifying the page of results to retrieve. - :param bool include_audit: Whether to include the audit properties (`created` and - `updated` timestamps) in the response. + Get information about a workspace, optionally including all workspace content. + + :param str workspace_id: Unique identifier of the workspace. + :param bool export: (optional) Whether to include all element content in + the returned data. If **export**=`false`, the returned data includes only + information about the element itself. If **export**=`true`, all content, + including subelements, is included. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. + :param str sort: (optional) Indicates how the returned workspace data will + be sorted. This parameter is valid only if **export**=`true`. Specify + `sort=stable` to sort all workspace objects by unique identifier, in + ascending alphabetical order. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `Workspace` object """ + if not workspace_id: + raise ValueError('workspace_id must be provided') headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'list_workspaces') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='get_workspace', + ) headers.update(sdk_headers) params = { 'version': self.version, - 'page_limit': page_limit, - 'include_count': include_count, + 'export': export, + 'include_audit': include_audit, 'sort': sort, - 'cursor': cursor, - 'include_audit': include_audit } - url = '/v1/workspaces' - response = self.request( + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id'] + path_param_values = self.encode_path_vars(workspace_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}'.format(**path_param_dict) + request = self.prepare_request( method='GET', url=url, headers=headers, params=params, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response - def update_workspace(self, - workspace_id, - name=None, - description=None, - language=None, - metadata=None, - learning_opt_out=None, - system_settings=None, - intents=None, - entities=None, - dialog_nodes=None, - counterexamples=None, - append=None, - **kwargs): + def update_workspace( + self, + workspace_id: str, + *, + name: Optional[str] = None, + description: Optional[str] = None, + language: Optional[str] = None, + dialog_nodes: Optional[List['DialogNode']] = None, + counterexamples: Optional[List['Counterexample']] = None, + metadata: Optional[dict] = None, + learning_opt_out: Optional[bool] = None, + system_settings: Optional['WorkspaceSystemSettings'] = None, + webhooks: Optional[List['Webhook']] = None, + intents: Optional[List['CreateIntent']] = None, + entities: Optional[List['CreateEntity']] = None, + append: Optional[bool] = None, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: """ Update workspace. Update an existing workspace with new or modified data. You must provide component objects defining the content of the updated workspace. - This operation is limited to 30 request per 30 minutes. For more information, see - **Rate limiting**. + **Note:** The new workspace data cannot be larger than 1.5 MB. For larger + requests, use the **Update workspace asynchronously** method. :param str workspace_id: Unique identifier of the workspace. - :param str name: The name of the workspace. This string cannot contain carriage - return, newline, or tab characters, and it must be no longer than 64 characters. - :param str description: The description of the workspace. This string cannot - contain carriage return, newline, or tab characters, and it must be no longer than - 128 characters. - :param str language: The language of the workspace. - :param dict metadata: Any metadata related to the workspace. - :param bool learning_opt_out: Whether training data from the workspace (including - artifacts such as intents and entities) can be used by IBM for general service - improvements. `true` indicates that workspace training data is not to be used. - :param WorkspaceSystemSettings system_settings: Global settings for the workspace. - :param list[CreateIntent] intents: An array of objects defining the intents for - the workspace. - :param list[CreateEntity] entities: An array of objects describing the entities - for the workspace. - :param list[DialogNode] dialog_nodes: An array of objects describing the dialog - nodes in the workspace. - :param list[Counterexample] counterexamples: An array of objects defining input - examples that have been marked as irrelevant input. - :param bool append: Whether the new data is to be appended to the existing data in - the workspace. If **append**=`false`, elements included in the new data completely - replace the corresponding existing elements, including all subelements. For - example, if the new data includes **entities** and **append**=`false`, all - existing entities in the workspace are discarded and replaced with the new - entities. - If **append**=`true`, existing elements are preserved, and the new elements are - added. If any elements in the new data collide with existing elements, the update - request fails. + :param str name: (optional) The name of the workspace. This string cannot + contain carriage return, newline, or tab characters. + :param str description: (optional) The description of the workspace. This + string cannot contain carriage return, newline, or tab characters. + :param str language: (optional) The language of the workspace. + :param List[DialogNode] dialog_nodes: (optional) An array of objects + describing the dialog nodes in the workspace. + :param List[Counterexample] counterexamples: (optional) An array of objects + defining input examples that have been marked as irrelevant input. + :param dict metadata: (optional) Any metadata related to the workspace. + :param bool learning_opt_out: (optional) Whether training data from the + workspace (including artifacts such as intents and entities) can be used by + IBM for general service improvements. `true` indicates that workspace + training data is not to be used. + :param WorkspaceSystemSettings system_settings: (optional) Global settings + for the workspace. + :param List[Webhook] webhooks: (optional) + :param List[CreateIntent] intents: (optional) An array of objects defining + the intents for the workspace. + :param List[CreateEntity] entities: (optional) An array of objects + describing the entities for the workspace. + :param bool append: (optional) Whether the new data is to be appended to + the existing data in the object. If **append**=`false`, elements included + in the new data completely replace the corresponding existing elements, + including all subelements. For example, if the new data for a workspace + includes **entities** and **append**=`false`, all existing entities in the + workspace are discarded and replaced with the new entities. + If **append**=`true`, existing elements are preserved, and the new elements + are added. If any elements in the new data collide with existing elements, + the update request fails. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `Workspace` object """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') + if dialog_nodes is not None: + dialog_nodes = [convert_model(x) for x in dialog_nodes] + if counterexamples is not None: + counterexamples = [convert_model(x) for x in counterexamples] if system_settings is not None: - system_settings = self._convert_model(system_settings, - WorkspaceSystemSettings) + system_settings = convert_model(system_settings) + if webhooks is not None: + webhooks = [convert_model(x) for x in webhooks] if intents is not None: - intents = [self._convert_model(x, CreateIntent) for x in intents] + intents = [convert_model(x) for x in intents] if entities is not None: - entities = [self._convert_model(x, CreateEntity) for x in entities] - if dialog_nodes is not None: - dialog_nodes = [ - self._convert_model(x, DialogNode) for x in dialog_nodes - ] - if counterexamples is not None: - counterexamples = [ - self._convert_model(x, Counterexample) for x in counterexamples - ] - + entities = [convert_model(x) for x in entities] headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'update_workspace') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='update_workspace', + ) headers.update(sdk_headers) - params = {'version': self.version, 'append': append} + params = { + 'version': self.version, + 'append': append, + 'include_audit': include_audit, + } data = { 'name': name, 'description': description, 'language': language, + 'dialog_nodes': dialog_nodes, + 'counterexamples': counterexamples, 'metadata': metadata, 'learning_opt_out': learning_opt_out, 'system_settings': system_settings, + 'webhooks': webhooks, 'intents': intents, 'entities': entities, - 'dialog_nodes': dialog_nodes, - 'counterexamples': counterexamples } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' - url = '/v1/workspaces/{0}'.format(*self._encode_path_vars(workspace_id)) - response = self.request( + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id'] + path_param_values = self.encode_path_vars(workspace_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}'.format(**path_param_dict) + request = self.prepare_request( method='POST', url=url, headers=headers, params=params, - json=data, - accept_json=True) - return response + data=data, + ) - ######################### - # Intents - ######################### + response = self.send(request, **kwargs) + return response - def create_intent(self, - workspace_id, - intent, - description=None, - examples=None, - **kwargs): + def delete_workspace( + self, + workspace_id: str, + **kwargs, + ) -> DetailedResponse: """ - Create intent. + Delete workspace. - Create a new intent. - This operation is limited to 2000 requests per 30 minutes. For more information, - see **Rate limiting**. + Delete a workspace from the service instance. :param str workspace_id: Unique identifier of the workspace. - :param str intent: The name of the intent. This string must conform to the - following restrictions: - - It can contain only Unicode alphanumeric, underscore, hyphen, and dot - characters. - - It cannot begin with the reserved prefix `sys-`. - - It must be no longer than 128 characters. - :param str description: The description of the intent. This string cannot contain - carriage return, newline, or tab characters, and it must be no longer than 128 - characters. - :param list[Example] examples: An array of user input examples for the intent. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') - if intent is None: - raise ValueError('intent must be provided') - if examples is not None: - examples = [self._convert_model(x, Example) for x in examples] - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'create_intent') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='delete_workspace', + ) headers.update(sdk_headers) - params = {'version': self.version} - - data = { - 'intent': intent, - 'description': description, - 'examples': examples + params = { + 'version': self.version, } - url = '/v1/workspaces/{0}/intents'.format( - *self._encode_path_vars(workspace_id)) - response = self.request( - method='POST', + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id'] + path_param_values = self.encode_path_vars(workspace_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}'.format(**path_param_dict) + request = self.prepare_request( + method='DELETE', url=url, headers=headers, params=params, - json=data, - accept_json=True) - return response - - def delete_intent(self, workspace_id, intent, **kwargs): - """ - Delete intent. + ) - Delete an intent from a workspace. - This operation is limited to 2000 requests per 30 minutes. For more information, - see **Rate limiting**. + response = self.send(request, **kwargs) + return response - :param str workspace_id: Unique identifier of the workspace. - :param str intent: The intent name. + def create_workspace_async( + self, + *, + name: Optional[str] = None, + description: Optional[str] = None, + language: Optional[str] = None, + dialog_nodes: Optional[List['DialogNode']] = None, + counterexamples: Optional[List['Counterexample']] = None, + metadata: Optional[dict] = None, + learning_opt_out: Optional[bool] = None, + system_settings: Optional['WorkspaceSystemSettings'] = None, + webhooks: Optional[List['Webhook']] = None, + intents: Optional[List['CreateIntent']] = None, + entities: Optional[List['CreateEntity']] = None, + **kwargs, + ) -> DetailedResponse: + """ + Create workspace asynchronously. + + Create a workspace asynchronously based on component objects. You must provide + workspace components defining the content of the new workspace. + A successful call to this method only initiates asynchronous creation of the + workspace. The new workspace is not available until processing completes. To check + the status of the asynchronous operation, use the **Get information about a + workspace** method. + + :param str name: (optional) The name of the workspace. This string cannot + contain carriage return, newline, or tab characters. + :param str description: (optional) The description of the workspace. This + string cannot contain carriage return, newline, or tab characters. + :param str language: (optional) The language of the workspace. + :param List[DialogNode] dialog_nodes: (optional) An array of objects + describing the dialog nodes in the workspace. + :param List[Counterexample] counterexamples: (optional) An array of objects + defining input examples that have been marked as irrelevant input. + :param dict metadata: (optional) Any metadata related to the workspace. + :param bool learning_opt_out: (optional) Whether training data from the + workspace (including artifacts such as intents and entities) can be used by + IBM for general service improvements. `true` indicates that workspace + training data is not to be used. + :param WorkspaceSystemSettings system_settings: (optional) Global settings + for the workspace. + :param List[Webhook] webhooks: (optional) + :param List[CreateIntent] intents: (optional) An array of objects defining + the intents for the workspace. + :param List[CreateEntity] entities: (optional) An array of objects + describing the entities for the workspace. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `Workspace` object """ - if workspace_id is None: - raise ValueError('workspace_id must be provided') - if intent is None: - raise ValueError('intent must be provided') - + if dialog_nodes is not None: + dialog_nodes = [convert_model(x) for x in dialog_nodes] + if counterexamples is not None: + counterexamples = [convert_model(x) for x in counterexamples] + if system_settings is not None: + system_settings = convert_model(system_settings) + if webhooks is not None: + webhooks = [convert_model(x) for x in webhooks] + if intents is not None: + intents = [convert_model(x) for x in intents] + if entities is not None: + entities = [convert_model(x) for x in entities] headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'delete_intent') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='create_workspace_async', + ) headers.update(sdk_headers) - params = {'version': self.version} + params = { + 'version': self.version, + } - url = '/v1/workspaces/{0}/intents/{1}'.format( - *self._encode_path_vars(workspace_id, intent)) - response = self.request( - method='DELETE', + data = { + 'name': name, + 'description': description, + 'language': language, + 'dialog_nodes': dialog_nodes, + 'counterexamples': counterexamples, + 'metadata': metadata, + 'learning_opt_out': learning_opt_out, + 'system_settings': system_settings, + 'webhooks': webhooks, + 'intents': intents, + 'entities': entities, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + url = '/v1/workspaces_async' + request = self.prepare_request( + method='POST', url=url, headers=headers, params=params, - accept_json=True) - return response + data=data, + ) - def get_intent(self, - workspace_id, - intent, - export=None, - include_audit=None, - **kwargs): - """ - Get intent. + response = self.send(request, **kwargs) + return response - Get information about an intent, optionally including all intent content. - With **export**=`false`, this operation is limited to 6000 requests per 5 minutes. - With **export**=`true`, the limit is 400 requests per 30 minutes. For more - information, see **Rate limiting**. + def update_workspace_async( + self, + workspace_id: str, + *, + name: Optional[str] = None, + description: Optional[str] = None, + language: Optional[str] = None, + dialog_nodes: Optional[List['DialogNode']] = None, + counterexamples: Optional[List['Counterexample']] = None, + metadata: Optional[dict] = None, + learning_opt_out: Optional[bool] = None, + system_settings: Optional['WorkspaceSystemSettings'] = None, + webhooks: Optional[List['Webhook']] = None, + intents: Optional[List['CreateIntent']] = None, + entities: Optional[List['CreateEntity']] = None, + append: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: + """ + Update workspace asynchronously. + + Update an existing workspace asynchronously with new or modified data. You must + provide component objects defining the content of the updated workspace. + A successful call to this method only initiates an asynchronous update of the + workspace. The updated workspace is not available until processing completes. To + check the status of the asynchronous operation, use the **Get information about a + workspace** method. :param str workspace_id: Unique identifier of the workspace. - :param str intent: The intent name. - :param bool export: Whether to include all element content in the returned data. - If **export**=`false`, the returned data includes only information about the - element itself. If **export**=`true`, all content, including subelements, is - included. - :param bool include_audit: Whether to include the audit properties (`created` and - `updated` timestamps) in the response. + :param str name: (optional) The name of the workspace. This string cannot + contain carriage return, newline, or tab characters. + :param str description: (optional) The description of the workspace. This + string cannot contain carriage return, newline, or tab characters. + :param str language: (optional) The language of the workspace. + :param List[DialogNode] dialog_nodes: (optional) An array of objects + describing the dialog nodes in the workspace. + :param List[Counterexample] counterexamples: (optional) An array of objects + defining input examples that have been marked as irrelevant input. + :param dict metadata: (optional) Any metadata related to the workspace. + :param bool learning_opt_out: (optional) Whether training data from the + workspace (including artifacts such as intents and entities) can be used by + IBM for general service improvements. `true` indicates that workspace + training data is not to be used. + :param WorkspaceSystemSettings system_settings: (optional) Global settings + for the workspace. + :param List[Webhook] webhooks: (optional) + :param List[CreateIntent] intents: (optional) An array of objects defining + the intents for the workspace. + :param List[CreateEntity] entities: (optional) An array of objects + describing the entities for the workspace. + :param bool append: (optional) Whether the new data is to be appended to + the existing data in the object. If **append**=`false`, elements included + in the new data completely replace the corresponding existing elements, + including all subelements. For example, if the new data for a workspace + includes **entities** and **append**=`false`, all existing entities in the + workspace are discarded and replaced with the new entities. + If **append**=`true`, existing elements are preserved, and the new elements + are added. If any elements in the new data collide with existing elements, + the update request fails. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `Workspace` object """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') - if intent is None: - raise ValueError('intent must be provided') - + if dialog_nodes is not None: + dialog_nodes = [convert_model(x) for x in dialog_nodes] + if counterexamples is not None: + counterexamples = [convert_model(x) for x in counterexamples] + if system_settings is not None: + system_settings = convert_model(system_settings) + if webhooks is not None: + webhooks = [convert_model(x) for x in webhooks] + if intents is not None: + intents = [convert_model(x) for x in intents] + if entities is not None: + entities = [convert_model(x) for x in entities] headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'get_intent') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='update_workspace_async', + ) headers.update(sdk_headers) params = { 'version': self.version, - 'export': export, - 'include_audit': include_audit + 'append': append, } - url = '/v1/workspaces/{0}/intents/{1}'.format( - *self._encode_path_vars(workspace_id, intent)) - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def list_intents(self, - workspace_id, - export=None, - page_limit=None, - include_count=None, - sort=None, - cursor=None, - include_audit=None, - **kwargs): + data = { + 'name': name, + 'description': description, + 'language': language, + 'dialog_nodes': dialog_nodes, + 'counterexamples': counterexamples, + 'metadata': metadata, + 'learning_opt_out': learning_opt_out, + 'system_settings': system_settings, + 'webhooks': webhooks, + 'intents': intents, + 'entities': entities, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id'] + path_param_values = self.encode_path_vars(workspace_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces_async/{workspace_id}'.format(**path_param_dict) + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + data=data, + ) + + response = self.send(request, **kwargs) + return response + + def export_workspace_async( + self, + workspace_id: str, + *, + include_audit: Optional[bool] = None, + sort: Optional[str] = None, + verbose: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: + """ + Export workspace asynchronously. + + Export the entire workspace asynchronously, including all workspace content. + A successful call to this method only initiates an asynchronous export. The + exported JSON data is not available until processing completes. After the initial + request is submitted, you can continue to poll by calling the same request again + and checking the value of the **status** property. When processing has completed, + the request returns the exported JSON data. Remember that the usual rate limits + apply. + + :param str workspace_id: Unique identifier of the workspace. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. + :param str sort: (optional) Indicates how the returned workspace data will + be sorted. Specify `sort=stable` to sort all workspace objects by unique + identifier, in ascending alphabetical order. + :param bool verbose: (optional) Whether the response should include the + `counts` property, which indicates how many of each component (such as + intents and entities) the workspace contains. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `Workspace` object + """ + + if not workspace_id: + raise ValueError('workspace_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='export_workspace_async', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + 'include_audit': include_audit, + 'sort': sort, + 'verbose': verbose, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id'] + path_param_values = self.encode_path_vars(workspace_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces_async/{workspace_id}/export'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + ######################### + # Intents + ######################### + + def list_intents( + self, + workspace_id: str, + *, + export: Optional[bool] = None, + page_limit: Optional[int] = None, + include_count: Optional[bool] = None, + sort: Optional[str] = None, + cursor: Optional[str] = None, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: """ List intents. List the intents for a workspace. - With **export**=`false`, this operation is limited to 2000 requests per 30 - minutes. With **export**=`true`, the limit is 400 requests per 30 minutes. For - more information, see **Rate limiting**. :param str workspace_id: Unique identifier of the workspace. - :param bool export: Whether to include all element content in the returned data. - If **export**=`false`, the returned data includes only information about the - element itself. If **export**=`true`, all content, including subelements, is - included. - :param int page_limit: The number of records to return in each page of results. - :param bool include_count: Whether to include information about the number of - records returned. - :param str sort: The attribute by which returned intents will be sorted. To - reverse the sort order, prefix the value with a minus sign (`-`). - :param str cursor: A token identifying the page of results to retrieve. - :param bool include_audit: Whether to include the audit properties (`created` and - `updated` timestamps) in the response. + :param bool export: (optional) Whether to include all element content in + the returned data. If **export**=`false`, the returned data includes only + information about the element itself. If **export**=`true`, all content, + including subelements, is included. + :param int page_limit: (optional) The number of records to return in each + page of results. + :param bool include_count: (optional) Whether to include information about + the number of records that satisfy the request, regardless of the page + limit. If this parameter is `true`, the `pagination` object in the response + includes the `total` property. + :param str sort: (optional) The attribute by which returned intents will be + sorted. To reverse the sort order, prefix the value with a minus sign + (`-`). + :param str cursor: (optional) A token identifying the page of results to + retrieve. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `IntentCollection` object """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'list_intents') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='list_intents', + ) headers.update(sdk_headers) params = { @@ -735,275 +1066,378 @@ def list_intents(self, 'include_count': include_count, 'sort': sort, 'cursor': cursor, - 'include_audit': include_audit + 'include_audit': include_audit, } - url = '/v1/workspaces/{0}/intents'.format( - *self._encode_path_vars(workspace_id)) - response = self.request( + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id'] + path_param_values = self.encode_path_vars(workspace_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/intents'.format(**path_param_dict) + request = self.prepare_request( method='GET', url=url, headers=headers, params=params, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response - def update_intent(self, - workspace_id, - intent, - new_intent=None, - new_description=None, - new_examples=None, - **kwargs): + def create_intent( + self, + workspace_id: str, + intent: str, + *, + description: Optional[str] = None, + examples: Optional[List['Example']] = None, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: """ - Update intent. + Create intent. - Update an existing intent with new or modified data. You must provide component - objects defining the content of the updated intent. - This operation is limited to 2000 requests per 30 minutes. For more information, - see **Rate limiting**. + Create a new intent. + If you want to create multiple intents with a single API call, consider using the + **[Update workspace](#update-workspace)** method instead. :param str workspace_id: Unique identifier of the workspace. - :param str intent: The intent name. - :param str new_intent: The name of the intent. This string must conform to the - following restrictions: - - It can contain only Unicode alphanumeric, underscore, hyphen, and dot - characters. - - It cannot begin with the reserved prefix `sys-`. - - It must be no longer than 128 characters. - :param str new_description: The description of the intent. This string cannot - contain carriage return, newline, or tab characters, and it must be no longer than - 128 characters. - :param list[Example] new_examples: An array of user input examples for the intent. + :param str intent: The name of the intent. This string must conform to the + following restrictions: + - It can contain only Unicode alphanumeric, underscore, hyphen, and dot + characters. + - It cannot begin with the reserved prefix `sys-`. + :param str description: (optional) The description of the intent. This + string cannot contain carriage return, newline, or tab characters. + :param List[Example] examples: (optional) An array of user input examples + for the intent. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `Intent` object """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') if intent is None: raise ValueError('intent must be provided') - if new_examples is not None: - new_examples = [ - self._convert_model(x, Example) for x in new_examples - ] - + if examples is not None: + examples = [convert_model(x) for x in examples] headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'update_intent') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='create_intent', + ) headers.update(sdk_headers) - params = {'version': self.version} + params = { + 'version': self.version, + 'include_audit': include_audit, + } data = { - 'intent': new_intent, - 'description': new_description, - 'examples': new_examples + 'intent': intent, + 'description': description, + 'examples': examples, } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' - url = '/v1/workspaces/{0}/intents/{1}'.format( - *self._encode_path_vars(workspace_id, intent)) - response = self.request( + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id'] + path_param_values = self.encode_path_vars(workspace_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/intents'.format(**path_param_dict) + request = self.prepare_request( method='POST', url=url, headers=headers, params=params, - json=data, - accept_json=True) - return response + data=data, + ) - ######################### - # Examples - ######################### + response = self.send(request, **kwargs) + return response - def create_example(self, - workspace_id, - intent, - text, - mentions=None, - **kwargs): + def get_intent( + self, + workspace_id: str, + intent: str, + *, + export: Optional[bool] = None, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: """ - Create user input example. + Get intent. - Add a new user input example to an intent. - This operation is limited to 1000 requests per 30 minutes. For more information, - see **Rate limiting**. + Get information about an intent, optionally including all intent content. :param str workspace_id: Unique identifier of the workspace. :param str intent: The intent name. - :param str text: The text of a user input example. This string must conform to the - following restrictions: - - It cannot contain carriage return, newline, or tab characters. - - It cannot consist of only whitespace characters. - - It must be no longer than 1024 characters. - :param list[Mention] mentions: An array of contextual entity mentions. + :param bool export: (optional) Whether to include all element content in + the returned data. If **export**=`false`, the returned data includes only + information about the element itself. If **export**=`true`, all content, + including subelements, is included. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `Intent` object """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') - if intent is None: + if not intent: raise ValueError('intent must be provided') - if text is None: - raise ValueError('text must be provided') - if mentions is not None: - mentions = [self._convert_model(x, Mention) for x in mentions] - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'create_example') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='get_intent', + ) headers.update(sdk_headers) - params = {'version': self.version} - - data = {'text': text, 'mentions': mentions} + params = { + 'version': self.version, + 'export': export, + 'include_audit': include_audit, + } - url = '/v1/workspaces/{0}/intents/{1}/examples'.format( - *self._encode_path_vars(workspace_id, intent)) - response = self.request( - method='POST', + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id', 'intent'] + path_param_values = self.encode_path_vars(workspace_id, intent) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/intents/{intent}'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', url=url, headers=headers, params=params, - json=data, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response - def delete_example(self, workspace_id, intent, text, **kwargs): + def update_intent( + self, + workspace_id: str, + intent: str, + *, + new_intent: Optional[str] = None, + new_description: Optional[str] = None, + new_examples: Optional[List['Example']] = None, + append: Optional[bool] = None, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: """ - Delete user input example. + Update intent. - Delete a user input example from an intent. - This operation is limited to 1000 requests per 30 minutes. For more information, - see **Rate limiting**. + Update an existing intent with new or modified data. You must provide component + objects defining the content of the updated intent. + If you want to update multiple intents with a single API call, consider using the + **[Update workspace](#update-workspace)** method instead. :param str workspace_id: Unique identifier of the workspace. :param str intent: The intent name. - :param str text: The text of the user input example. + :param str new_intent: (optional) The name of the intent. This string must + conform to the following restrictions: + - It can contain only Unicode alphanumeric, underscore, hyphen, and dot + characters. + - It cannot begin with the reserved prefix `sys-`. + :param str new_description: (optional) The description of the intent. This + string cannot contain carriage return, newline, or tab characters. + :param List[Example] new_examples: (optional) An array of user input + examples for the intent. + :param bool append: (optional) Whether the new data is to be appended to + the existing data in the object. If **append**=`false`, elements included + in the new data completely replace the corresponding existing elements, + including all subelements. For example, if the new data for the intent + includes **examples** and **append**=`false`, all existing examples for the + intent are discarded and replaced with the new examples. + If **append**=`true`, existing elements are preserved, and the new elements + are added. If any elements in the new data collide with existing elements, + the update request fails. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `Intent` object """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') - if intent is None: + if not intent: raise ValueError('intent must be provided') - if text is None: - raise ValueError('text must be provided') - + if new_examples is not None: + new_examples = [convert_model(x) for x in new_examples] headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'delete_example') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='update_intent', + ) headers.update(sdk_headers) - params = {'version': self.version} + params = { + 'version': self.version, + 'append': append, + 'include_audit': include_audit, + } + + data = { + 'intent': new_intent, + 'description': new_description, + 'examples': new_examples, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' - url = '/v1/workspaces/{0}/intents/{1}/examples/{2}'.format( - *self._encode_path_vars(workspace_id, intent, text)) - response = self.request( - method='DELETE', + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id', 'intent'] + path_param_values = self.encode_path_vars(workspace_id, intent) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/intents/{intent}'.format( + **path_param_dict) + request = self.prepare_request( + method='POST', url=url, headers=headers, params=params, - accept_json=True) + data=data, + ) + + response = self.send(request, **kwargs) return response - def get_example(self, - workspace_id, - intent, - text, - include_audit=None, - **kwargs): + def delete_intent( + self, + workspace_id: str, + intent: str, + **kwargs, + ) -> DetailedResponse: """ - Get user input example. + Delete intent. - Get information about a user input example. - This operation is limited to 6000 requests per 5 minutes. For more information, - see **Rate limiting**. + Delete an intent from a workspace. :param str workspace_id: Unique identifier of the workspace. :param str intent: The intent name. - :param str text: The text of the user input example. - :param bool include_audit: Whether to include the audit properties (`created` and - `updated` timestamps) in the response. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') - if intent is None: + if not intent: raise ValueError('intent must be provided') - if text is None: - raise ValueError('text must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'get_example') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='delete_intent', + ) headers.update(sdk_headers) - params = {'version': self.version, 'include_audit': include_audit} + params = { + 'version': self.version, + } - url = '/v1/workspaces/{0}/intents/{1}/examples/{2}'.format( - *self._encode_path_vars(workspace_id, intent, text)) - response = self.request( - method='GET', + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id', 'intent'] + path_param_values = self.encode_path_vars(workspace_id, intent) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/intents/{intent}'.format( + **path_param_dict) + request = self.prepare_request( + method='DELETE', url=url, headers=headers, params=params, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response - def list_examples(self, - workspace_id, - intent, - page_limit=None, - include_count=None, - sort=None, - cursor=None, - include_audit=None, - **kwargs): + ######################### + # Examples + ######################### + + def list_examples( + self, + workspace_id: str, + intent: str, + *, + page_limit: Optional[int] = None, + include_count: Optional[bool] = None, + sort: Optional[str] = None, + cursor: Optional[str] = None, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: """ List user input examples. List the user input examples for an intent, optionally including contextual entity mentions. - This operation is limited to 2500 requests per 30 minutes. For more information, - see **Rate limiting**. :param str workspace_id: Unique identifier of the workspace. :param str intent: The intent name. - :param int page_limit: The number of records to return in each page of results. - :param bool include_count: Whether to include information about the number of - records returned. - :param str sort: The attribute by which returned examples will be sorted. To - reverse the sort order, prefix the value with a minus sign (`-`). - :param str cursor: A token identifying the page of results to retrieve. - :param bool include_audit: Whether to include the audit properties (`created` and - `updated` timestamps) in the response. + :param int page_limit: (optional) The number of records to return in each + page of results. + :param bool include_count: (optional) Whether to include information about + the number of records that satisfy the request, regardless of the page + limit. If this parameter is `true`, the `pagination` object in the response + includes the `total` property. + :param str sort: (optional) The attribute by which returned examples will + be sorted. To reverse the sort order, prefix the value with a minus sign + (`-`). + :param str cursor: (optional) A token identifying the page of results to + retrieve. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `ExampleCollection` object """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') - if intent is None: + if not intent: raise ValueError('intent must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'list_examples') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='list_examples', + ) headers.update(sdk_headers) params = { @@ -1012,256 +1446,364 @@ def list_examples(self, 'include_count': include_count, 'sort': sort, 'cursor': cursor, - 'include_audit': include_audit + 'include_audit': include_audit, } - url = '/v1/workspaces/{0}/intents/{1}/examples'.format( - *self._encode_path_vars(workspace_id, intent)) - response = self.request( + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id', 'intent'] + path_param_values = self.encode_path_vars(workspace_id, intent) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/intents/{intent}/examples'.format( + **path_param_dict) + request = self.prepare_request( method='GET', url=url, headers=headers, params=params, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response - def update_example(self, - workspace_id, - intent, - text, - new_text=None, - new_mentions=None, - **kwargs): + def create_example( + self, + workspace_id: str, + intent: str, + text: str, + *, + mentions: Optional[List['Mention']] = None, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: """ - Update user input example. + Create user input example. - Update the text of a user input example. - This operation is limited to 1000 requests per 30 minutes. For more information, - see **Rate limiting**. + Add a new user input example to an intent. + If you want to add multiple examples with a single API call, consider using the + **[Update intent](#update-intent)** method instead. :param str workspace_id: Unique identifier of the workspace. :param str intent: The intent name. - :param str text: The text of the user input example. - :param str new_text: The text of the user input example. This string must conform - to the following restrictions: - - It cannot contain carriage return, newline, or tab characters. - - It cannot consist of only whitespace characters. - - It must be no longer than 1024 characters. - :param list[Mention] new_mentions: An array of contextual entity mentions. + :param str text: The text of a user input example. This string must conform + to the following restrictions: + - It cannot contain carriage return, newline, or tab characters. + - It cannot consist of only whitespace characters. + :param List[Mention] mentions: (optional) An array of contextual entity + mentions. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `Example` object """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') - if intent is None: + if not intent: raise ValueError('intent must be provided') if text is None: raise ValueError('text must be provided') - if new_mentions is not None: - new_mentions = [ - self._convert_model(x, Mention) for x in new_mentions - ] - + if mentions is not None: + mentions = [convert_model(x) for x in mentions] headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'update_example') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='create_example', + ) headers.update(sdk_headers) - params = {'version': self.version} + params = { + 'version': self.version, + 'include_audit': include_audit, + } - data = {'text': new_text, 'mentions': new_mentions} + data = { + 'text': text, + 'mentions': mentions, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' - url = '/v1/workspaces/{0}/intents/{1}/examples/{2}'.format( - *self._encode_path_vars(workspace_id, intent, text)) - response = self.request( + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id', 'intent'] + path_param_values = self.encode_path_vars(workspace_id, intent) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/intents/{intent}/examples'.format( + **path_param_dict) + request = self.prepare_request( method='POST', url=url, headers=headers, params=params, - json=data, - accept_json=True) - return response + data=data, + ) - ######################### - # Counterexamples - ######################### + response = self.send(request, **kwargs) + return response - def create_counterexample(self, workspace_id, text, **kwargs): + def get_example( + self, + workspace_id: str, + intent: str, + text: str, + *, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: """ - Create counterexample. + Get user input example. - Add a new counterexample to a workspace. Counterexamples are examples that have - been marked as irrelevant input. - This operation is limited to 1000 requests per 30 minutes. For more information, - see **Rate limiting**. + Get information about a user input example. :param str workspace_id: Unique identifier of the workspace. - :param str text: The text of a user input marked as irrelevant input. This string - must conform to the following restrictions: - - It cannot contain carriage return, newline, or tab characters - - It cannot consist of only whitespace characters - - It must be no longer than 1024 characters. + :param str intent: The intent name. + :param str text: The text of the user input example. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `Example` object """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') - if text is None: + if not intent: + raise ValueError('intent must be provided') + if not text: raise ValueError('text must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', - 'create_counterexample') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='get_example', + ) headers.update(sdk_headers) - params = {'version': self.version} - - data = {'text': text} + params = { + 'version': self.version, + 'include_audit': include_audit, + } - url = '/v1/workspaces/{0}/counterexamples'.format( - *self._encode_path_vars(workspace_id)) - response = self.request( - method='POST', + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id', 'intent', 'text'] + path_param_values = self.encode_path_vars(workspace_id, intent, text) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/intents/{intent}/examples/{text}'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', url=url, headers=headers, params=params, - json=data, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response - def delete_counterexample(self, workspace_id, text, **kwargs): + def update_example( + self, + workspace_id: str, + intent: str, + text: str, + *, + new_text: Optional[str] = None, + new_mentions: Optional[List['Mention']] = None, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: """ - Delete counterexample. + Update user input example. - Delete a counterexample from a workspace. Counterexamples are examples that have - been marked as irrelevant input. - This operation is limited to 1000 requests per 30 minutes. For more information, - see **Rate limiting**. + Update the text of a user input example. + If you want to update multiple examples with a single API call, consider using the + **[Update intent](#update-intent)** method instead. :param str workspace_id: Unique identifier of the workspace. - :param str text: The text of a user input counterexample (for example, `What are - you wearing?`). + :param str intent: The intent name. + :param str text: The text of the user input example. + :param str new_text: (optional) The text of the user input example. This + string must conform to the following restrictions: + - It cannot contain carriage return, newline, or tab characters. + - It cannot consist of only whitespace characters. + :param List[Mention] new_mentions: (optional) An array of contextual entity + mentions. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `Example` object """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') - if text is None: + if not intent: + raise ValueError('intent must be provided') + if not text: raise ValueError('text must be provided') - + if new_mentions is not None: + new_mentions = [convert_model(x) for x in new_mentions] headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', - 'delete_counterexample') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='update_example', + ) headers.update(sdk_headers) - params = {'version': self.version} + params = { + 'version': self.version, + 'include_audit': include_audit, + } - url = '/v1/workspaces/{0}/counterexamples/{1}'.format( - *self._encode_path_vars(workspace_id, text)) - response = self.request( - method='DELETE', + data = { + 'text': new_text, + 'mentions': new_mentions, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id', 'intent', 'text'] + path_param_values = self.encode_path_vars(workspace_id, intent, text) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/intents/{intent}/examples/{text}'.format( + **path_param_dict) + request = self.prepare_request( + method='POST', url=url, headers=headers, params=params, - accept_json=True) + data=data, + ) + + response = self.send(request, **kwargs) return response - def get_counterexample(self, - workspace_id, - text, - include_audit=None, - **kwargs): + def delete_example( + self, + workspace_id: str, + intent: str, + text: str, + **kwargs, + ) -> DetailedResponse: """ - Get counterexample. + Delete user input example. - Get information about a counterexample. Counterexamples are examples that have - been marked as irrelevant input. - This operation is limited to 6000 requests per 5 minutes. For more information, - see **Rate limiting**. + Delete a user input example from an intent. :param str workspace_id: Unique identifier of the workspace. - :param str text: The text of a user input counterexample (for example, `What are - you wearing?`). - :param bool include_audit: Whether to include the audit properties (`created` and - `updated` timestamps) in the response. + :param str intent: The intent name. + :param str text: The text of the user input example. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') - if text is None: + if not intent: + raise ValueError('intent must be provided') + if not text: raise ValueError('text must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', - 'get_counterexample') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='delete_example', + ) headers.update(sdk_headers) - params = {'version': self.version, 'include_audit': include_audit} + params = { + 'version': self.version, + } - url = '/v1/workspaces/{0}/counterexamples/{1}'.format( - *self._encode_path_vars(workspace_id, text)) - response = self.request( - method='GET', + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id', 'intent', 'text'] + path_param_values = self.encode_path_vars(workspace_id, intent, text) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/intents/{intent}/examples/{text}'.format( + **path_param_dict) + request = self.prepare_request( + method='DELETE', url=url, headers=headers, params=params, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response - def list_counterexamples(self, - workspace_id, - page_limit=None, - include_count=None, - sort=None, - cursor=None, - include_audit=None, - **kwargs): + ######################### + # Counterexamples + ######################### + + def list_counterexamples( + self, + workspace_id: str, + *, + page_limit: Optional[int] = None, + include_count: Optional[bool] = None, + sort: Optional[str] = None, + cursor: Optional[str] = None, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: """ List counterexamples. List the counterexamples for a workspace. Counterexamples are examples that have been marked as irrelevant input. - This operation is limited to 2500 requests per 30 minutes. For more information, - see **Rate limiting**. :param str workspace_id: Unique identifier of the workspace. - :param int page_limit: The number of records to return in each page of results. - :param bool include_count: Whether to include information about the number of - records returned. - :param str sort: The attribute by which returned counterexamples will be sorted. - To reverse the sort order, prefix the value with a minus sign (`-`). - :param str cursor: A token identifying the page of results to retrieve. - :param bool include_audit: Whether to include the audit properties (`created` and - `updated` timestamps) in the response. + :param int page_limit: (optional) The number of records to return in each + page of results. + :param bool include_count: (optional) Whether to include information about + the number of records that satisfy the request, regardless of the page + limit. If this parameter is `true`, the `pagination` object in the response + includes the `total` property. + :param str sort: (optional) The attribute by which returned counterexamples + will be sorted. To reverse the sort order, prefix the value with a minus + sign (`-`). + :param str cursor: (optional) A token identifying the page of results to + retrieve. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `CounterexampleCollection` object """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', - 'list_counterexamples') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='list_counterexamples', + ) headers.update(sdk_headers) params = { @@ -1270,275 +1812,345 @@ def list_counterexamples(self, 'include_count': include_count, 'sort': sort, 'cursor': cursor, - 'include_audit': include_audit + 'include_audit': include_audit, } - url = '/v1/workspaces/{0}/counterexamples'.format( - *self._encode_path_vars(workspace_id)) - response = self.request( + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id'] + path_param_values = self.encode_path_vars(workspace_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/counterexamples'.format( + **path_param_dict) + request = self.prepare_request( method='GET', url=url, headers=headers, params=params, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response - def update_counterexample(self, workspace_id, text, new_text=None, - **kwargs): + def create_counterexample( + self, + workspace_id: str, + text: str, + *, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: """ - Update counterexample. + Create counterexample. - Update the text of a counterexample. Counterexamples are examples that have been - marked as irrelevant input. - This operation is limited to 1000 requests per 30 minutes. For more information, - see **Rate limiting**. + Add a new counterexample to a workspace. Counterexamples are examples that have + been marked as irrelevant input. + If you want to add multiple counterexamples with a single API call, consider using + the **[Update workspace](#update-workspace)** method instead. :param str workspace_id: Unique identifier of the workspace. - :param str text: The text of a user input counterexample (for example, `What are - you wearing?`). - :param str new_text: The text of a user input marked as irrelevant input. This - string must conform to the following restrictions: - - It cannot contain carriage return, newline, or tab characters - - It cannot consist of only whitespace characters - - It must be no longer than 1024 characters. + :param str text: The text of a user input marked as irrelevant input. This + string must conform to the following restrictions: + - It cannot contain carriage return, newline, or tab characters. + - It cannot consist of only whitespace characters. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `Counterexample` object """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') if text is None: raise ValueError('text must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', - 'update_counterexample') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='create_counterexample', + ) headers.update(sdk_headers) - params = {'version': self.version} + params = { + 'version': self.version, + 'include_audit': include_audit, + } - data = {'text': new_text} + data = { + 'text': text, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' - url = '/v1/workspaces/{0}/counterexamples/{1}'.format( - *self._encode_path_vars(workspace_id, text)) - response = self.request( + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id'] + path_param_values = self.encode_path_vars(workspace_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/counterexamples'.format( + **path_param_dict) + request = self.prepare_request( method='POST', url=url, headers=headers, params=params, - json=data, - accept_json=True) - return response + data=data, + ) - ######################### - # Entities - ######################### + response = self.send(request, **kwargs) + return response - def create_entity(self, - workspace_id, - entity, - description=None, - metadata=None, - fuzzy_match=None, - values=None, - **kwargs): + def get_counterexample( + self, + workspace_id: str, + text: str, + *, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: """ - Create entity. + Get counterexample. - Create a new entity, or enable a system entity. - This operation is limited to 1000 requests per 30 minutes. For more information, - see **Rate limiting**. + Get information about a counterexample. Counterexamples are examples that have + been marked as irrelevant input. :param str workspace_id: Unique identifier of the workspace. - :param str entity: The name of the entity. This string must conform to the - following restrictions: - - It can contain only Unicode alphanumeric, underscore, and hyphen characters. - - It must be no longer than 64 characters. - If you specify an entity name beginning with the reserved prefix `sys-`, it must - be the name of a system entity that you want to enable. (Any entity content - specified with the request is ignored.). - :param str description: The description of the entity. This string cannot contain - carriage return, newline, or tab characters, and it must be no longer than 128 - characters. - :param dict metadata: Any metadata related to the entity. - :param bool fuzzy_match: Whether to use fuzzy matching for the entity. - :param list[CreateValue] values: An array of objects describing the entity values. + :param str text: The text of a user input counterexample (for example, + `What are you wearing?`). + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `Counterexample` object """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') - if entity is None: - raise ValueError('entity must be provided') - if values is not None: - values = [self._convert_model(x, CreateValue) for x in values] - + if not text: + raise ValueError('text must be provided') headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'create_entity') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='get_counterexample', + ) headers.update(sdk_headers) - params = {'version': self.version} - - data = { - 'entity': entity, - 'description': description, - 'metadata': metadata, - 'fuzzy_match': fuzzy_match, - 'values': values + params = { + 'version': self.version, + 'include_audit': include_audit, } - url = '/v1/workspaces/{0}/entities'.format( - *self._encode_path_vars(workspace_id)) - response = self.request( - method='POST', + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id', 'text'] + path_param_values = self.encode_path_vars(workspace_id, text) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/counterexamples/{text}'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', url=url, headers=headers, params=params, - json=data, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response - def delete_entity(self, workspace_id, entity, **kwargs): + def update_counterexample( + self, + workspace_id: str, + text: str, + *, + new_text: Optional[str] = None, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: """ - Delete entity. + Update counterexample. - Delete an entity from a workspace, or disable a system entity. - This operation is limited to 1000 requests per 30 minutes. For more information, - see **Rate limiting**. + Update the text of a counterexample. Counterexamples are examples that have been + marked as irrelevant input. :param str workspace_id: Unique identifier of the workspace. - :param str entity: The name of the entity. + :param str text: The text of a user input counterexample (for example, + `What are you wearing?`). + :param str new_text: (optional) The text of a user input marked as + irrelevant input. This string must conform to the following restrictions: + - It cannot contain carriage return, newline, or tab characters. + - It cannot consist of only whitespace characters. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `Counterexample` object """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') - if entity is None: - raise ValueError('entity must be provided') - + if not text: + raise ValueError('text must be provided') headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'delete_entity') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='update_counterexample', + ) headers.update(sdk_headers) - params = {'version': self.version} + params = { + 'version': self.version, + 'include_audit': include_audit, + } + + data = { + 'text': new_text, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' - url = '/v1/workspaces/{0}/entities/{1}'.format( - *self._encode_path_vars(workspace_id, entity)) - response = self.request( - method='DELETE', + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id', 'text'] + path_param_values = self.encode_path_vars(workspace_id, text) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/counterexamples/{text}'.format( + **path_param_dict) + request = self.prepare_request( + method='POST', url=url, headers=headers, params=params, - accept_json=True) + data=data, + ) + + response = self.send(request, **kwargs) return response - def get_entity(self, - workspace_id, - entity, - export=None, - include_audit=None, - **kwargs): + def delete_counterexample( + self, + workspace_id: str, + text: str, + **kwargs, + ) -> DetailedResponse: """ - Get entity. + Delete counterexample. - Get information about an entity, optionally including all entity content. - With **export**=`false`, this operation is limited to 6000 requests per 5 minutes. - With **export**=`true`, the limit is 200 requests per 30 minutes. For more - information, see **Rate limiting**. + Delete a counterexample from a workspace. Counterexamples are examples that have + been marked as irrelevant input. :param str workspace_id: Unique identifier of the workspace. - :param str entity: The name of the entity. - :param bool export: Whether to include all element content in the returned data. - If **export**=`false`, the returned data includes only information about the - element itself. If **export**=`true`, all content, including subelements, is - included. - :param bool include_audit: Whether to include the audit properties (`created` and - `updated` timestamps) in the response. + :param str text: The text of a user input counterexample (for example, + `What are you wearing?`). :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') - if entity is None: - raise ValueError('entity must be provided') - + if not text: + raise ValueError('text must be provided') headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'get_entity') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='delete_counterexample', + ) headers.update(sdk_headers) params = { 'version': self.version, - 'export': export, - 'include_audit': include_audit } - url = '/v1/workspaces/{0}/entities/{1}'.format( - *self._encode_path_vars(workspace_id, entity)) - response = self.request( - method='GET', + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id', 'text'] + path_param_values = self.encode_path_vars(workspace_id, text) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/counterexamples/{text}'.format( + **path_param_dict) + request = self.prepare_request( + method='DELETE', url=url, headers=headers, params=params, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response - def list_entities(self, - workspace_id, - export=None, - page_limit=None, - include_count=None, - sort=None, - cursor=None, - include_audit=None, - **kwargs): + ######################### + # Entities + ######################### + + def list_entities( + self, + workspace_id: str, + *, + export: Optional[bool] = None, + page_limit: Optional[int] = None, + include_count: Optional[bool] = None, + sort: Optional[str] = None, + cursor: Optional[str] = None, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: """ List entities. List the entities for a workspace. - With **export**=`false`, this operation is limited to 1000 requests per 30 - minutes. With **export**=`true`, the limit is 200 requests per 30 minutes. For - more information, see **Rate limiting**. :param str workspace_id: Unique identifier of the workspace. - :param bool export: Whether to include all element content in the returned data. - If **export**=`false`, the returned data includes only information about the - element itself. If **export**=`true`, all content, including subelements, is - included. - :param int page_limit: The number of records to return in each page of results. - :param bool include_count: Whether to include information about the number of - records returned. - :param str sort: The attribute by which returned entities will be sorted. To - reverse the sort order, prefix the value with a minus sign (`-`). - :param str cursor: A token identifying the page of results to retrieve. - :param bool include_audit: Whether to include the audit properties (`created` and - `updated` timestamps) in the response. + :param bool export: (optional) Whether to include all element content in + the returned data. If **export**=`false`, the returned data includes only + information about the element itself. If **export**=`true`, all content, + including subelements, is included. + :param int page_limit: (optional) The number of records to return in each + page of results. + :param bool include_count: (optional) Whether to include information about + the number of records that satisfy the request, regardless of the page + limit. If this parameter is `true`, the `pagination` object in the response + includes the `total` property. + :param str sort: (optional) The attribute by which returned entities will + be sorted. To reverse the sort order, prefix the value with a minus sign + (`-`). + :param str cursor: (optional) A token identifying the page of results to + retrieve. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `EntityCollection` object """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'list_entities') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='list_entities', + ) headers.update(sdk_headers) params = { @@ -1548,371 +2160,468 @@ def list_entities(self, 'include_count': include_count, 'sort': sort, 'cursor': cursor, - 'include_audit': include_audit + 'include_audit': include_audit, } - url = '/v1/workspaces/{0}/entities'.format( - *self._encode_path_vars(workspace_id)) - response = self.request( + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id'] + path_param_values = self.encode_path_vars(workspace_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/entities'.format(**path_param_dict) + request = self.prepare_request( method='GET', url=url, headers=headers, params=params, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response - def update_entity(self, - workspace_id, - entity, - new_entity=None, - new_description=None, - new_metadata=None, - new_fuzzy_match=None, - new_values=None, - **kwargs): + def create_entity( + self, + workspace_id: str, + entity: str, + *, + description: Optional[str] = None, + metadata: Optional[dict] = None, + fuzzy_match: Optional[bool] = None, + values: Optional[List['CreateValue']] = None, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: """ - Update entity. + Create entity. - Update an existing entity with new or modified data. You must provide component - objects defining the content of the updated entity. - This operation is limited to 1000 requests per 30 minutes. For more information, - see **Rate limiting**. + Create a new entity, or enable a system entity. + If you want to create multiple entities with a single API call, consider using the + **[Update workspace](#update-workspace)** method instead. :param str workspace_id: Unique identifier of the workspace. - :param str entity: The name of the entity. - :param str new_entity: The name of the entity. This string must conform to the - following restrictions: - - It can contain only Unicode alphanumeric, underscore, and hyphen characters. - - It cannot begin with the reserved prefix `sys-`. - - It must be no longer than 64 characters. - :param str new_description: The description of the entity. This string cannot - contain carriage return, newline, or tab characters, and it must be no longer than - 128 characters. - :param dict new_metadata: Any metadata related to the entity. - :param bool new_fuzzy_match: Whether to use fuzzy matching for the entity. - :param list[CreateValue] new_values: An array of objects describing the entity - values. + :param str entity: The name of the entity. This string must conform to the + following restrictions: + - It can contain only Unicode alphanumeric, underscore, and hyphen + characters. + - If you specify an entity name beginning with the reserved prefix `sys-`, + it must be the name of a system entity that you want to enable. (Any entity + content specified with the request is ignored.). + :param str description: (optional) The description of the entity. This + string cannot contain carriage return, newline, or tab characters. + :param dict metadata: (optional) Any metadata related to the entity. + :param bool fuzzy_match: (optional) Whether to use fuzzy matching for the + entity. + :param List[CreateValue] values: (optional) An array of objects describing + the entity values. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `Entity` object """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') if entity is None: raise ValueError('entity must be provided') - if new_values is not None: - new_values = [ - self._convert_model(x, CreateValue) for x in new_values - ] - + if values is not None: + values = [convert_model(x) for x in values] headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'update_entity') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='create_entity', + ) headers.update(sdk_headers) - params = {'version': self.version} + params = { + 'version': self.version, + 'include_audit': include_audit, + } data = { - 'entity': new_entity, - 'description': new_description, - 'metadata': new_metadata, - 'fuzzy_match': new_fuzzy_match, - 'values': new_values + 'entity': entity, + 'description': description, + 'metadata': metadata, + 'fuzzy_match': fuzzy_match, + 'values': values, } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' - url = '/v1/workspaces/{0}/entities/{1}'.format( - *self._encode_path_vars(workspace_id, entity)) - response = self.request( + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id'] + path_param_values = self.encode_path_vars(workspace_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/entities'.format(**path_param_dict) + request = self.prepare_request( method='POST', url=url, headers=headers, params=params, - json=data, - accept_json=True) - return response + data=data, + ) - ######################### - # Mentions - ######################### + response = self.send(request, **kwargs) + return response - def list_mentions(self, - workspace_id, - entity, - export=None, - include_audit=None, - **kwargs): + def get_entity( + self, + workspace_id: str, + entity: str, + *, + export: Optional[bool] = None, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: """ - List entity mentions. + Get entity. - List mentions for a contextual entity. An entity mention is an occurrence of a - contextual entity in the context of an intent user input example. - This operation is limited to 200 requests per 30 minutes. For more information, - see **Rate limiting**. + Get information about an entity, optionally including all entity content. :param str workspace_id: Unique identifier of the workspace. :param str entity: The name of the entity. - :param bool export: Whether to include all element content in the returned data. - If **export**=`false`, the returned data includes only information about the - element itself. If **export**=`true`, all content, including subelements, is - included. - :param bool include_audit: Whether to include the audit properties (`created` and - `updated` timestamps) in the response. + :param bool export: (optional) Whether to include all element content in + the returned data. If **export**=`false`, the returned data includes only + information about the element itself. If **export**=`true`, all content, + including subelements, is included. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `Entity` object """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') - if entity is None: + if not entity: raise ValueError('entity must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'list_mentions') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='get_entity', + ) headers.update(sdk_headers) params = { 'version': self.version, 'export': export, - 'include_audit': include_audit + 'include_audit': include_audit, } - url = '/v1/workspaces/{0}/entities/{1}/mentions'.format( - *self._encode_path_vars(workspace_id, entity)) - response = self.request( + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id', 'entity'] + path_param_values = self.encode_path_vars(workspace_id, entity) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/entities/{entity}'.format( + **path_param_dict) + request = self.prepare_request( method='GET', url=url, headers=headers, params=params, - accept_json=True) - return response + ) - ######################### - # Values - ######################### + response = self.send(request, **kwargs) + return response - def create_value(self, - workspace_id, - entity, - value, - metadata=None, - value_type=None, - synonyms=None, - patterns=None, - **kwargs): + def update_entity( + self, + workspace_id: str, + entity: str, + *, + new_entity: Optional[str] = None, + new_description: Optional[str] = None, + new_metadata: Optional[dict] = None, + new_fuzzy_match: Optional[bool] = None, + new_values: Optional[List['CreateValue']] = None, + append: Optional[bool] = None, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: """ - Create entity value. + Update entity. - Create a new value for an entity. - This operation is limited to 1000 requests per 30 minutes. For more information, - see **Rate limiting**. + Update an existing entity with new or modified data. You must provide component + objects defining the content of the updated entity. + If you want to update multiple entities with a single API call, consider using the + **[Update workspace](#update-workspace)** method instead. :param str workspace_id: Unique identifier of the workspace. :param str entity: The name of the entity. - :param str value: The text of the entity value. This string must conform to the - following restrictions: - - It cannot contain carriage return, newline, or tab characters. - - It cannot consist of only whitespace characters. - - It must be no longer than 64 characters. - :param dict metadata: Any metadata related to the entity value. - :param str value_type: Specifies the type of entity value. - :param list[str] synonyms: An array of synonyms for the entity value. A value can - specify either synonyms or patterns (depending on the value type), but not both. A - synonym must conform to the following resrictions: - - It cannot contain carriage return, newline, or tab characters. - - It cannot consist of only whitespace characters. - - It must be no longer than 64 characters. - :param list[str] patterns: An array of patterns for the entity value. A value can - specify either synonyms or patterns (depending on the value type), but not both. A - pattern is a regular expression no longer than 512 characters. For more - information about how to specify a pattern, see the - [documentation](https://cloud.ibm.com/docs/services/assistant/entities.html#entities-create-dictionary-based). + :param str new_entity: (optional) The name of the entity. This string must + conform to the following restrictions: + - It can contain only Unicode alphanumeric, underscore, and hyphen + characters. + - It cannot begin with the reserved prefix `sys-`. + :param str new_description: (optional) The description of the entity. This + string cannot contain carriage return, newline, or tab characters. + :param dict new_metadata: (optional) Any metadata related to the entity. + :param bool new_fuzzy_match: (optional) Whether to use fuzzy matching for + the entity. + :param List[CreateValue] new_values: (optional) An array of objects + describing the entity values. + :param bool append: (optional) Whether the new data is to be appended to + the existing data in the entity. If **append**=`false`, elements included + in the new data completely replace the corresponding existing elements, + including all subelements. For example, if the new data for the entity + includes **values** and **append**=`false`, all existing values for the + entity are discarded and replaced with the new values. + If **append**=`true`, existing elements are preserved, and the new elements + are added. If any elements in the new data collide with existing elements, + the update request fails. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `Entity` object """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') - if entity is None: + if not entity: raise ValueError('entity must be provided') - if value is None: - raise ValueError('value must be provided') - + if new_values is not None: + new_values = [convert_model(x) for x in new_values] headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'create_value') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='update_entity', + ) headers.update(sdk_headers) - params = {'version': self.version} + params = { + 'version': self.version, + 'append': append, + 'include_audit': include_audit, + } data = { - 'value': value, - 'metadata': metadata, - 'type': value_type, - 'synonyms': synonyms, - 'patterns': patterns + 'entity': new_entity, + 'description': new_description, + 'metadata': new_metadata, + 'fuzzy_match': new_fuzzy_match, + 'values': new_values, } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' - url = '/v1/workspaces/{0}/entities/{1}/values'.format( - *self._encode_path_vars(workspace_id, entity)) - response = self.request( + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id', 'entity'] + path_param_values = self.encode_path_vars(workspace_id, entity) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/entities/{entity}'.format( + **path_param_dict) + request = self.prepare_request( method='POST', url=url, headers=headers, params=params, - json=data, - accept_json=True) + data=data, + ) + + response = self.send(request, **kwargs) return response - def delete_value(self, workspace_id, entity, value, **kwargs): + def delete_entity( + self, + workspace_id: str, + entity: str, + **kwargs, + ) -> DetailedResponse: """ - Delete entity value. + Delete entity. - Delete a value from an entity. - This operation is limited to 1000 requests per 30 minutes. For more information, - see **Rate limiting**. + Delete an entity from a workspace, or disable a system entity. :param str workspace_id: Unique identifier of the workspace. :param str entity: The name of the entity. - :param str value: The text of the entity value. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') - if entity is None: + if not entity: raise ValueError('entity must be provided') - if value is None: - raise ValueError('value must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'delete_value') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='delete_entity', + ) headers.update(sdk_headers) - params = {'version': self.version} + params = { + 'version': self.version, + } - url = '/v1/workspaces/{0}/entities/{1}/values/{2}'.format( - *self._encode_path_vars(workspace_id, entity, value)) - response = self.request( + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id', 'entity'] + path_param_values = self.encode_path_vars(workspace_id, entity) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/entities/{entity}'.format( + **path_param_dict) + request = self.prepare_request( method='DELETE', url=url, headers=headers, params=params, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response - def get_value(self, - workspace_id, - entity, - value, - export=None, - include_audit=None, - **kwargs): + ######################### + # Mentions + ######################### + + def list_mentions( + self, + workspace_id: str, + entity: str, + *, + export: Optional[bool] = None, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: """ - Get entity value. + List entity mentions. - Get information about an entity value. - This operation is limited to 6000 requests per 5 minutes. For more information, - see **Rate limiting**. + List mentions for a contextual entity. An entity mention is an occurrence of a + contextual entity in the context of an intent user input example. :param str workspace_id: Unique identifier of the workspace. :param str entity: The name of the entity. - :param str value: The text of the entity value. - :param bool export: Whether to include all element content in the returned data. - If **export**=`false`, the returned data includes only information about the - element itself. If **export**=`true`, all content, including subelements, is - included. - :param bool include_audit: Whether to include the audit properties (`created` and - `updated` timestamps) in the response. + :param bool export: (optional) Whether to include all element content in + the returned data. If **export**=`false`, the returned data includes only + information about the element itself. If **export**=`true`, all content, + including subelements, is included. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `EntityMentionCollection` object """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') - if entity is None: + if not entity: raise ValueError('entity must be provided') - if value is None: - raise ValueError('value must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'get_value') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='list_mentions', + ) headers.update(sdk_headers) params = { 'version': self.version, 'export': export, - 'include_audit': include_audit + 'include_audit': include_audit, } - url = '/v1/workspaces/{0}/entities/{1}/values/{2}'.format( - *self._encode_path_vars(workspace_id, entity, value)) - response = self.request( + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id', 'entity'] + path_param_values = self.encode_path_vars(workspace_id, entity) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/entities/{entity}/mentions'.format( + **path_param_dict) + request = self.prepare_request( method='GET', url=url, headers=headers, params=params, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response - def list_values(self, - workspace_id, - entity, - export=None, - page_limit=None, - include_count=None, - sort=None, - cursor=None, - include_audit=None, - **kwargs): + ######################### + # Values + ######################### + + def list_values( + self, + workspace_id: str, + entity: str, + *, + export: Optional[bool] = None, + page_limit: Optional[int] = None, + include_count: Optional[bool] = None, + sort: Optional[str] = None, + cursor: Optional[str] = None, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: """ List entity values. List the values for an entity. - This operation is limited to 2500 requests per 30 minutes. For more information, - see **Rate limiting**. :param str workspace_id: Unique identifier of the workspace. :param str entity: The name of the entity. - :param bool export: Whether to include all element content in the returned data. - If **export**=`false`, the returned data includes only information about the - element itself. If **export**=`true`, all content, including subelements, is - included. - :param int page_limit: The number of records to return in each page of results. - :param bool include_count: Whether to include information about the number of - records returned. - :param str sort: The attribute by which returned entity values will be sorted. To - reverse the sort order, prefix the value with a minus sign (`-`). - :param str cursor: A token identifying the page of results to retrieve. - :param bool include_audit: Whether to include the audit properties (`created` and - `updated` timestamps) in the response. + :param bool export: (optional) Whether to include all element content in + the returned data. If **export**=`false`, the returned data includes only + information about the element itself. If **export**=`true`, all content, + including subelements, is included. + :param int page_limit: (optional) The number of records to return in each + page of results. + :param bool include_count: (optional) Whether to include information about + the number of records that satisfy the request, regardless of the page + limit. If this parameter is `true`, the `pagination` object in the response + includes the `total` property. + :param str sort: (optional) The attribute by which returned entity values + will be sorted. To reverse the sort order, prefix the value with a minus + sign (`-`). + :param str cursor: (optional) A token identifying the page of results to + retrieve. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `ValueCollection` object """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') - if entity is None: + if not entity: raise ValueError('entity must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'list_values') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='list_values', + ) headers.update(sdk_headers) params = { @@ -1922,292 +2631,421 @@ def list_values(self, 'include_count': include_count, 'sort': sort, 'cursor': cursor, - 'include_audit': include_audit + 'include_audit': include_audit, } - url = '/v1/workspaces/{0}/entities/{1}/values'.format( - *self._encode_path_vars(workspace_id, entity)) - response = self.request( + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id', 'entity'] + path_param_values = self.encode_path_vars(workspace_id, entity) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/entities/{entity}/values'.format( + **path_param_dict) + request = self.prepare_request( method='GET', url=url, headers=headers, params=params, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response - def update_value(self, - workspace_id, - entity, - value, - new_value=None, - new_metadata=None, - new_value_type=None, - new_synonyms=None, - new_patterns=None, - **kwargs): + def create_value( + self, + workspace_id: str, + entity: str, + value: str, + *, + metadata: Optional[dict] = None, + type: Optional[str] = None, + synonyms: Optional[List[str]] = None, + patterns: Optional[List[str]] = None, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: """ - Update entity value. + Create entity value. - Update an existing entity value with new or modified data. You must provide - component objects defining the content of the updated entity value. - This operation is limited to 1000 requests per 30 minutes. For more information, - see **Rate limiting**. + Create a new value for an entity. + If you want to create multiple entity values with a single API call, consider + using the **[Update entity](#update-entity)** method instead. :param str workspace_id: Unique identifier of the workspace. :param str entity: The name of the entity. - :param str value: The text of the entity value. - :param str new_value: The text of the entity value. This string must conform to - the following restrictions: - - It cannot contain carriage return, newline, or tab characters. - - It cannot consist of only whitespace characters. - - It must be no longer than 64 characters. - :param dict new_metadata: Any metadata related to the entity value. - :param str new_value_type: Specifies the type of entity value. - :param list[str] new_synonyms: An array of synonyms for the entity value. A value - can specify either synonyms or patterns (depending on the value type), but not - both. A synonym must conform to the following resrictions: - - It cannot contain carriage return, newline, or tab characters. - - It cannot consist of only whitespace characters. - - It must be no longer than 64 characters. - :param list[str] new_patterns: An array of patterns for the entity value. A value - can specify either synonyms or patterns (depending on the value type), but not - both. A pattern is a regular expression no longer than 512 characters. For more - information about how to specify a pattern, see the - [documentation](https://cloud.ibm.com/docs/services/assistant/entities.html#entities-create-dictionary-based). + :param str value: The text of the entity value. This string must conform to + the following restrictions: + - It cannot contain carriage return, newline, or tab characters. + - It cannot consist of only whitespace characters. + :param dict metadata: (optional) Any metadata related to the entity value. + :param str type: (optional) Specifies the type of entity value. + :param List[str] synonyms: (optional) An array of synonyms for the entity + value. A value can specify either synonyms or patterns (depending on the + value type), but not both. A synonym must conform to the following + resrictions: + - It cannot contain carriage return, newline, or tab characters. + - It cannot consist of only whitespace characters. + :param List[str] patterns: (optional) An array of patterns for the entity + value. A value can specify either synonyms or patterns (depending on the + value type), but not both. A pattern is a regular expression; for more + information about how to specify a pattern, see the + [documentation](https://cloud.ibm.com/docs/assistant?topic=assistant-entities#entities-create-dictionary-based). + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `Value` object """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') - if entity is None: + if not entity: raise ValueError('entity must be provided') if value is None: raise ValueError('value must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'update_value') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='create_value', + ) headers.update(sdk_headers) - params = {'version': self.version} + params = { + 'version': self.version, + 'include_audit': include_audit, + } data = { - 'value': new_value, - 'metadata': new_metadata, - 'type': new_value_type, - 'synonyms': new_synonyms, - 'patterns': new_patterns + 'value': value, + 'metadata': metadata, + 'type': type, + 'synonyms': synonyms, + 'patterns': patterns, } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' - url = '/v1/workspaces/{0}/entities/{1}/values/{2}'.format( - *self._encode_path_vars(workspace_id, entity, value)) - response = self.request( + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id', 'entity'] + path_param_values = self.encode_path_vars(workspace_id, entity) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/entities/{entity}/values'.format( + **path_param_dict) + request = self.prepare_request( method='POST', url=url, headers=headers, params=params, - json=data, - accept_json=True) - return response + data=data, + ) - ######################### - # Synonyms - ######################### + response = self.send(request, **kwargs) + return response - def create_synonym(self, workspace_id, entity, value, synonym, **kwargs): + def get_value( + self, + workspace_id: str, + entity: str, + value: str, + *, + export: Optional[bool] = None, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: """ - Create entity value synonym. + Get entity value. - Add a new synonym to an entity value. - This operation is limited to 1000 requests per 30 minutes. For more information, - see **Rate limiting**. + Get information about an entity value. :param str workspace_id: Unique identifier of the workspace. :param str entity: The name of the entity. :param str value: The text of the entity value. - :param str synonym: The text of the synonym. This string must conform to the - following restrictions: - - It cannot contain carriage return, newline, or tab characters. - - It cannot consist of only whitespace characters. - - It must be no longer than 64 characters. + :param bool export: (optional) Whether to include all element content in + the returned data. If **export**=`false`, the returned data includes only + information about the element itself. If **export**=`true`, all content, + including subelements, is included. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `Value` object """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') - if entity is None: + if not entity: raise ValueError('entity must be provided') - if value is None: + if not value: raise ValueError('value must be provided') - if synonym is None: - raise ValueError('synonym must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'create_synonym') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='get_value', + ) headers.update(sdk_headers) - params = {'version': self.version} - - data = {'synonym': synonym} + params = { + 'version': self.version, + 'export': export, + 'include_audit': include_audit, + } - url = '/v1/workspaces/{0}/entities/{1}/values/{2}/synonyms'.format( - *self._encode_path_vars(workspace_id, entity, value)) - response = self.request( - method='POST', + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id', 'entity', 'value'] + path_param_values = self.encode_path_vars(workspace_id, entity, value) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/entities/{entity}/values/{value}'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', url=url, headers=headers, params=params, - json=data, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response - def delete_synonym(self, workspace_id, entity, value, synonym, **kwargs): + def update_value( + self, + workspace_id: str, + entity: str, + value: str, + *, + new_value: Optional[str] = None, + new_metadata: Optional[dict] = None, + new_type: Optional[str] = None, + new_synonyms: Optional[List[str]] = None, + new_patterns: Optional[List[str]] = None, + append: Optional[bool] = None, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: """ - Delete entity value synonym. + Update entity value. - Delete a synonym from an entity value. - This operation is limited to 1000 requests per 30 minutes. For more information, - see **Rate limiting**. + Update an existing entity value with new or modified data. You must provide + component objects defining the content of the updated entity value. + If you want to update multiple entity values with a single API call, consider + using the **[Update entity](#update-entity)** method instead. :param str workspace_id: Unique identifier of the workspace. :param str entity: The name of the entity. :param str value: The text of the entity value. - :param str synonym: The text of the synonym. + :param str new_value: (optional) The text of the entity value. This string + must conform to the following restrictions: + - It cannot contain carriage return, newline, or tab characters. + - It cannot consist of only whitespace characters. + :param dict new_metadata: (optional) Any metadata related to the entity + value. + :param str new_type: (optional) Specifies the type of entity value. + :param List[str] new_synonyms: (optional) An array of synonyms for the + entity value. A value can specify either synonyms or patterns (depending on + the value type), but not both. A synonym must conform to the following + resrictions: + - It cannot contain carriage return, newline, or tab characters. + - It cannot consist of only whitespace characters. + :param List[str] new_patterns: (optional) An array of patterns for the + entity value. A value can specify either synonyms or patterns (depending on + the value type), but not both. A pattern is a regular expression; for more + information about how to specify a pattern, see the + [documentation](https://cloud.ibm.com/docs/assistant?topic=assistant-entities#entities-create-dictionary-based). + :param bool append: (optional) Whether the new data is to be appended to + the existing data in the entity value. If **append**=`false`, elements + included in the new data completely replace the corresponding existing + elements, including all subelements. For example, if the new data for the + entity value includes **synonyms** and **append**=`false`, all existing + synonyms for the entity value are discarded and replaced with the new + synonyms. + If **append**=`true`, existing elements are preserved, and the new elements + are added. If any elements in the new data collide with existing elements, + the update request fails. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `Value` object """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') - if entity is None: + if not entity: raise ValueError('entity must be provided') - if value is None: + if not value: raise ValueError('value must be provided') - if synonym is None: - raise ValueError('synonym must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'delete_synonym') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='update_value', + ) headers.update(sdk_headers) - params = {'version': self.version} + params = { + 'version': self.version, + 'append': append, + 'include_audit': include_audit, + } - url = '/v1/workspaces/{0}/entities/{1}/values/{2}/synonyms/{3}'.format( - *self._encode_path_vars(workspace_id, entity, value, synonym)) - response = self.request( - method='DELETE', + data = { + 'value': new_value, + 'metadata': new_metadata, + 'type': new_type, + 'synonyms': new_synonyms, + 'patterns': new_patterns, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id', 'entity', 'value'] + path_param_values = self.encode_path_vars(workspace_id, entity, value) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/entities/{entity}/values/{value}'.format( + **path_param_dict) + request = self.prepare_request( + method='POST', url=url, headers=headers, params=params, - accept_json=True) + data=data, + ) + + response = self.send(request, **kwargs) return response - def get_synonym(self, - workspace_id, - entity, - value, - synonym, - include_audit=None, - **kwargs): + def delete_value( + self, + workspace_id: str, + entity: str, + value: str, + **kwargs, + ) -> DetailedResponse: """ - Get entity value synonym. + Delete entity value. - Get information about a synonym of an entity value. - This operation is limited to 6000 requests per 5 minutes. For more information, - see **Rate limiting**. + Delete a value from an entity. :param str workspace_id: Unique identifier of the workspace. :param str entity: The name of the entity. :param str value: The text of the entity value. - :param str synonym: The text of the synonym. - :param bool include_audit: Whether to include the audit properties (`created` and - `updated` timestamps) in the response. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') - if entity is None: + if not entity: raise ValueError('entity must be provided') - if value is None: + if not value: raise ValueError('value must be provided') - if synonym is None: - raise ValueError('synonym must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'get_synonym') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='delete_value', + ) headers.update(sdk_headers) - params = {'version': self.version, 'include_audit': include_audit} + params = { + 'version': self.version, + } - url = '/v1/workspaces/{0}/entities/{1}/values/{2}/synonyms/{3}'.format( - *self._encode_path_vars(workspace_id, entity, value, synonym)) - response = self.request( - method='GET', + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id', 'entity', 'value'] + path_param_values = self.encode_path_vars(workspace_id, entity, value) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/entities/{entity}/values/{value}'.format( + **path_param_dict) + request = self.prepare_request( + method='DELETE', url=url, headers=headers, params=params, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response - def list_synonyms(self, - workspace_id, - entity, - value, - page_limit=None, - include_count=None, - sort=None, - cursor=None, - include_audit=None, - **kwargs): + ######################### + # Synonyms + ######################### + + def list_synonyms( + self, + workspace_id: str, + entity: str, + value: str, + *, + page_limit: Optional[int] = None, + include_count: Optional[bool] = None, + sort: Optional[str] = None, + cursor: Optional[str] = None, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: """ List entity value synonyms. List the synonyms for an entity value. - This operation is limited to 2500 requests per 30 minutes. For more information, - see **Rate limiting**. :param str workspace_id: Unique identifier of the workspace. :param str entity: The name of the entity. :param str value: The text of the entity value. - :param int page_limit: The number of records to return in each page of results. - :param bool include_count: Whether to include information about the number of - records returned. - :param str sort: The attribute by which returned entity value synonyms will be - sorted. To reverse the sort order, prefix the value with a minus sign (`-`). - :param str cursor: A token identifying the page of results to retrieve. - :param bool include_audit: Whether to include the audit properties (`created` and - `updated` timestamps) in the response. + :param int page_limit: (optional) The number of records to return in each + page of results. + :param bool include_count: (optional) Whether to include information about + the number of records that satisfy the request, regardless of the page + limit. If this parameter is `true`, the `pagination` object in the response + includes the `total` property. + :param str sort: (optional) The attribute by which returned entity value + synonyms will be sorted. To reverse the sort order, prefix the value with a + minus sign (`-`). + :param str cursor: (optional) A token identifying the page of results to + retrieve. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `SynonymCollection` object """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') - if entity is None: + if not entity: raise ValueError('entity must be provided') - if value is None: + if not value: raise ValueError('value must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'list_synonyms') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='list_synonyms', + ) headers.update(sdk_headers) params = { @@ -2216,327 +3054,372 @@ def list_synonyms(self, 'include_count': include_count, 'sort': sort, 'cursor': cursor, - 'include_audit': include_audit + 'include_audit': include_audit, } - url = '/v1/workspaces/{0}/entities/{1}/values/{2}/synonyms'.format( - *self._encode_path_vars(workspace_id, entity, value)) - response = self.request( + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id', 'entity', 'value'] + path_param_values = self.encode_path_vars(workspace_id, entity, value) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/entities/{entity}/values/{value}/synonyms'.format( + **path_param_dict) + request = self.prepare_request( method='GET', url=url, headers=headers, params=params, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response - def update_synonym(self, - workspace_id, - entity, - value, - synonym, - new_synonym=None, - **kwargs): + def create_synonym( + self, + workspace_id: str, + entity: str, + value: str, + synonym: str, + *, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: """ - Update entity value synonym. + Create entity value synonym. - Update an existing entity value synonym with new text. - This operation is limited to 1000 requests per 30 minutes. For more information, - see **Rate limiting**. + Add a new synonym to an entity value. + If you want to create multiple synonyms with a single API call, consider using the + **[Update entity](#update-entity)** or **[Update entity + value](#update-entity-value)** method instead. :param str workspace_id: Unique identifier of the workspace. :param str entity: The name of the entity. :param str value: The text of the entity value. - :param str synonym: The text of the synonym. - :param str new_synonym: The text of the synonym. This string must conform to the - following restrictions: - - It cannot contain carriage return, newline, or tab characters. - - It cannot consist of only whitespace characters. - - It must be no longer than 64 characters. + :param str synonym: The text of the synonym. This string must conform to + the following restrictions: + - It cannot contain carriage return, newline, or tab characters. + - It cannot consist of only whitespace characters. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `Synonym` object """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') - if entity is None: + if not entity: raise ValueError('entity must be provided') - if value is None: + if not value: raise ValueError('value must be provided') if synonym is None: raise ValueError('synonym must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'update_synonym') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='create_synonym', + ) headers.update(sdk_headers) - params = {'version': self.version} + params = { + 'version': self.version, + 'include_audit': include_audit, + } - data = {'synonym': new_synonym} + data = { + 'synonym': synonym, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' - url = '/v1/workspaces/{0}/entities/{1}/values/{2}/synonyms/{3}'.format( - *self._encode_path_vars(workspace_id, entity, value, synonym)) - response = self.request( + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id', 'entity', 'value'] + path_param_values = self.encode_path_vars(workspace_id, entity, value) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/entities/{entity}/values/{value}/synonyms'.format( + **path_param_dict) + request = self.prepare_request( method='POST', url=url, headers=headers, params=params, - json=data, - accept_json=True) - return response + data=data, + ) - ######################### - # Dialog nodes - ######################### + response = self.send(request, **kwargs) + return response - def create_dialog_node(self, - workspace_id, - dialog_node, - description=None, - conditions=None, - parent=None, - previous_sibling=None, - output=None, - context=None, - metadata=None, - next_step=None, - title=None, - node_type=None, - event_name=None, - variable=None, - actions=None, - digress_in=None, - digress_out=None, - digress_out_slots=None, - user_label=None, - **kwargs): + def get_synonym( + self, + workspace_id: str, + entity: str, + value: str, + synonym: str, + *, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: """ - Create dialog node. + Get entity value synonym. - Create a new dialog node. - This operation is limited to 500 requests per 30 minutes. For more information, - see **Rate limiting**. + Get information about a synonym of an entity value. :param str workspace_id: Unique identifier of the workspace. - :param str dialog_node: The dialog node ID. This string must conform to the - following restrictions: - - It can contain only Unicode alphanumeric, space, underscore, hyphen, and dot - characters. - - It must be no longer than 1024 characters. - :param str description: The description of the dialog node. This string cannot - contain carriage return, newline, or tab characters, and it must be no longer than - 128 characters. - :param str conditions: The condition that will trigger the dialog node. This - string cannot contain carriage return, newline, or tab characters, and it must be - no longer than 2048 characters. - :param str parent: The ID of the parent dialog node. This property is omitted if - the dialog node has no parent. - :param str previous_sibling: The ID of the previous sibling dialog node. This - property is omitted if the dialog node has no previous sibling. - :param DialogNodeOutput output: The output of the dialog node. For more - information about how to specify dialog node output, see the - [documentation](https://cloud.ibm.com/docs/services/assistant/dialog-overview.html#dialog-overview-responses). - :param dict context: The context for the dialog node. - :param dict metadata: The metadata for the dialog node. - :param DialogNodeNextStep next_step: The next step to execute following this - dialog node. - :param str title: The alias used to identify the dialog node. This string must - conform to the following restrictions: - - It can contain only Unicode alphanumeric, space, underscore, hyphen, and dot - characters. - - It must be no longer than 64 characters. - :param str node_type: How the dialog node is processed. - :param str event_name: How an `event_handler` node is processed. - :param str variable: The location in the dialog context where output is stored. - :param list[DialogNodeAction] actions: An array of objects describing any actions - to be invoked by the dialog node. - :param str digress_in: Whether this top-level dialog node can be digressed into. - :param str digress_out: Whether this dialog node can be returned to after a - digression. - :param str digress_out_slots: Whether the user can digress to top-level nodes - while filling out slots. - :param str user_label: A label that can be displayed externally to describe the - purpose of the node to users. This string must be no longer than 512 characters. + :param str entity: The name of the entity. + :param str value: The text of the entity value. + :param str synonym: The text of the synonym. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `Synonym` object """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') - if dialog_node is None: - raise ValueError('dialog_node must be provided') - if output is not None: - output = self._convert_model(output, DialogNodeOutput) - if next_step is not None: - next_step = self._convert_model(next_step, DialogNodeNextStep) - if actions is not None: - actions = [ - self._convert_model(x, DialogNodeAction) for x in actions - ] - + if not entity: + raise ValueError('entity must be provided') + if not value: + raise ValueError('value must be provided') + if not synonym: + raise ValueError('synonym must be provided') headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', - 'create_dialog_node') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='get_synonym', + ) headers.update(sdk_headers) - params = {'version': self.version} - - data = { - 'dialog_node': dialog_node, - 'description': description, - 'conditions': conditions, - 'parent': parent, - 'previous_sibling': previous_sibling, - 'output': output, - 'context': context, - 'metadata': metadata, - 'next_step': next_step, - 'title': title, - 'type': node_type, - 'event_name': event_name, - 'variable': variable, - 'actions': actions, - 'digress_in': digress_in, - 'digress_out': digress_out, - 'digress_out_slots': digress_out_slots, - 'user_label': user_label + params = { + 'version': self.version, + 'include_audit': include_audit, } - url = '/v1/workspaces/{0}/dialog_nodes'.format( - *self._encode_path_vars(workspace_id)) - response = self.request( - method='POST', + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id', 'entity', 'value', 'synonym'] + path_param_values = self.encode_path_vars(workspace_id, entity, value, + synonym) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/entities/{entity}/values/{value}/synonyms/{synonym}'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', url=url, headers=headers, params=params, - json=data, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response - def delete_dialog_node(self, workspace_id, dialog_node, **kwargs): + def update_synonym( + self, + workspace_id: str, + entity: str, + value: str, + synonym: str, + *, + new_synonym: Optional[str] = None, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: """ - Delete dialog node. + Update entity value synonym. - Delete a dialog node from a workspace. - This operation is limited to 500 requests per 30 minutes. For more information, - see **Rate limiting**. + Update an existing entity value synonym with new text. + If you want to update multiple synonyms with a single API call, consider using the + **[Update entity](#update-entity)** or **[Update entity + value](#update-entity-value)** method instead. :param str workspace_id: Unique identifier of the workspace. - :param str dialog_node: The dialog node ID (for example, `get_order`). + :param str entity: The name of the entity. + :param str value: The text of the entity value. + :param str synonym: The text of the synonym. + :param str new_synonym: (optional) The text of the synonym. This string + must conform to the following restrictions: + - It cannot contain carriage return, newline, or tab characters. + - It cannot consist of only whitespace characters. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `Synonym` object """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') - if dialog_node is None: - raise ValueError('dialog_node must be provided') - + if not entity: + raise ValueError('entity must be provided') + if not value: + raise ValueError('value must be provided') + if not synonym: + raise ValueError('synonym must be provided') headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', - 'delete_dialog_node') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='update_synonym', + ) headers.update(sdk_headers) - params = {'version': self.version} + params = { + 'version': self.version, + 'include_audit': include_audit, + } - url = '/v1/workspaces/{0}/dialog_nodes/{1}'.format( - *self._encode_path_vars(workspace_id, dialog_node)) - response = self.request( - method='DELETE', + data = { + 'synonym': new_synonym, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id', 'entity', 'value', 'synonym'] + path_param_values = self.encode_path_vars(workspace_id, entity, value, + synonym) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/entities/{entity}/values/{value}/synonyms/{synonym}'.format( + **path_param_dict) + request = self.prepare_request( + method='POST', url=url, headers=headers, params=params, - accept_json=True) + data=data, + ) + + response = self.send(request, **kwargs) return response - def get_dialog_node(self, - workspace_id, - dialog_node, - include_audit=None, - **kwargs): + def delete_synonym( + self, + workspace_id: str, + entity: str, + value: str, + synonym: str, + **kwargs, + ) -> DetailedResponse: """ - Get dialog node. + Delete entity value synonym. - Get information about a dialog node. - This operation is limited to 6000 requests per 5 minutes. For more information, - see **Rate limiting**. + Delete a synonym from an entity value. :param str workspace_id: Unique identifier of the workspace. - :param str dialog_node: The dialog node ID (for example, `get_order`). - :param bool include_audit: Whether to include the audit properties (`created` and - `updated` timestamps) in the response. + :param str entity: The name of the entity. + :param str value: The text of the entity value. + :param str synonym: The text of the synonym. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') - if dialog_node is None: - raise ValueError('dialog_node must be provided') - + if not entity: + raise ValueError('entity must be provided') + if not value: + raise ValueError('value must be provided') + if not synonym: + raise ValueError('synonym must be provided') headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'get_dialog_node') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='delete_synonym', + ) headers.update(sdk_headers) - params = {'version': self.version, 'include_audit': include_audit} + params = { + 'version': self.version, + } - url = '/v1/workspaces/{0}/dialog_nodes/{1}'.format( - *self._encode_path_vars(workspace_id, dialog_node)) - response = self.request( - method='GET', + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id', 'entity', 'value', 'synonym'] + path_param_values = self.encode_path_vars(workspace_id, entity, value, + synonym) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/entities/{entity}/values/{value}/synonyms/{synonym}'.format( + **path_param_dict) + request = self.prepare_request( + method='DELETE', url=url, headers=headers, params=params, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response - def list_dialog_nodes(self, - workspace_id, - page_limit=None, - include_count=None, - sort=None, - cursor=None, - include_audit=None, - **kwargs): + ######################### + # Dialog nodes + ######################### + + def list_dialog_nodes( + self, + workspace_id: str, + *, + page_limit: Optional[int] = None, + include_count: Optional[bool] = None, + sort: Optional[str] = None, + cursor: Optional[str] = None, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: """ List dialog nodes. List the dialog nodes for a workspace. - This operation is limited to 2500 requests per 30 minutes. For more information, - see **Rate limiting**. :param str workspace_id: Unique identifier of the workspace. - :param int page_limit: The number of records to return in each page of results. - :param bool include_count: Whether to include information about the number of - records returned. - :param str sort: The attribute by which returned dialog nodes will be sorted. To - reverse the sort order, prefix the value with a minus sign (`-`). - :param str cursor: A token identifying the page of results to retrieve. - :param bool include_audit: Whether to include the audit properties (`created` and - `updated` timestamps) in the response. + :param int page_limit: (optional) The number of records to return in each + page of results. + :param bool include_count: (optional) Whether to include information about + the number of records that satisfy the request, regardless of the page + limit. If this parameter is `true`, the `pagination` object in the response + includes the `total` property. + :param str sort: (optional) The attribute by which returned dialog nodes + will be sorted. To reverse the sort order, prefix the value with a minus + sign (`-`). + :param str cursor: (optional) A token identifying the page of results to + retrieve. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `DialogNodeCollection` object """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'list_dialog_nodes') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='list_dialog_nodes', + ) headers.update(sdk_headers) params = { @@ -2545,119 +3428,371 @@ def list_dialog_nodes(self, 'include_count': include_count, 'sort': sort, 'cursor': cursor, - 'include_audit': include_audit + 'include_audit': include_audit, } - url = '/v1/workspaces/{0}/dialog_nodes'.format( - *self._encode_path_vars(workspace_id)) - response = self.request( + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id'] + path_param_values = self.encode_path_vars(workspace_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/dialog_nodes'.format( + **path_param_dict) + request = self.prepare_request( method='GET', url=url, headers=headers, params=params, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response - def update_dialog_node(self, - workspace_id, - dialog_node, - new_dialog_node=None, - new_description=None, - new_conditions=None, - new_parent=None, - new_previous_sibling=None, - new_output=None, - new_context=None, - new_metadata=None, - new_next_step=None, - new_title=None, - new_node_type=None, - new_event_name=None, - new_variable=None, - new_actions=None, - new_digress_in=None, - new_digress_out=None, - new_digress_out_slots=None, - new_user_label=None, - **kwargs): + def create_dialog_node( + self, + workspace_id: str, + dialog_node: str, + *, + description: Optional[str] = None, + conditions: Optional[str] = None, + parent: Optional[str] = None, + previous_sibling: Optional[str] = None, + output: Optional['DialogNodeOutput'] = None, + context: Optional['DialogNodeContext'] = None, + metadata: Optional[dict] = None, + next_step: Optional['DialogNodeNextStep'] = None, + title: Optional[str] = None, + type: Optional[str] = None, + event_name: Optional[str] = None, + variable: Optional[str] = None, + actions: Optional[List['DialogNodeAction']] = None, + digress_in: Optional[str] = None, + digress_out: Optional[str] = None, + digress_out_slots: Optional[str] = None, + user_label: Optional[str] = None, + disambiguation_opt_out: Optional[bool] = None, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: """ - Update dialog node. + Create dialog node. - Update an existing dialog node with new or modified data. - This operation is limited to 500 requests per 30 minutes. For more information, - see **Rate limiting**. + Create a new dialog node. + If you want to create multiple dialog nodes with a single API call, consider using + the **[Update workspace](#update-workspace)** method instead. :param str workspace_id: Unique identifier of the workspace. - :param str dialog_node: The dialog node ID (for example, `get_order`). - :param str new_dialog_node: The dialog node ID. This string must conform to the - following restrictions: - - It can contain only Unicode alphanumeric, space, underscore, hyphen, and dot - characters. - - It must be no longer than 1024 characters. - :param str new_description: The description of the dialog node. This string cannot - contain carriage return, newline, or tab characters, and it must be no longer than - 128 characters. - :param str new_conditions: The condition that will trigger the dialog node. This - string cannot contain carriage return, newline, or tab characters, and it must be - no longer than 2048 characters. - :param str new_parent: The ID of the parent dialog node. This property is omitted - if the dialog node has no parent. - :param str new_previous_sibling: The ID of the previous sibling dialog node. This - property is omitted if the dialog node has no previous sibling. - :param DialogNodeOutput new_output: The output of the dialog node. For more - information about how to specify dialog node output, see the - [documentation](https://cloud.ibm.com/docs/services/assistant/dialog-overview.html#dialog-overview-responses). - :param dict new_context: The context for the dialog node. - :param dict new_metadata: The metadata for the dialog node. - :param DialogNodeNextStep new_next_step: The next step to execute following this - dialog node. - :param str new_title: The alias used to identify the dialog node. This string must - conform to the following restrictions: - - It can contain only Unicode alphanumeric, space, underscore, hyphen, and dot - characters. - - It must be no longer than 64 characters. - :param str new_node_type: How the dialog node is processed. - :param str new_event_name: How an `event_handler` node is processed. - :param str new_variable: The location in the dialog context where output is - stored. - :param list[DialogNodeAction] new_actions: An array of objects describing any - actions to be invoked by the dialog node. - :param str new_digress_in: Whether this top-level dialog node can be digressed - into. - :param str new_digress_out: Whether this dialog node can be returned to after a - digression. - :param str new_digress_out_slots: Whether the user can digress to top-level nodes - while filling out slots. - :param str new_user_label: A label that can be displayed externally to describe - the purpose of the node to users. This string must be no longer than 512 - characters. + :param str dialog_node: The unique ID of the dialog node. This is an + internal identifier used to refer to the dialog node from other dialog + nodes and in the diagnostic information included with message responses. + This string can contain only Unicode alphanumeric, space, underscore, + hyphen, and dot characters. + :param str description: (optional) The description of the dialog node. This + string cannot contain carriage return, newline, or tab characters. + :param str conditions: (optional) The condition that will trigger the + dialog node. This string cannot contain carriage return, newline, or tab + characters. + :param str parent: (optional) The unique ID of the parent dialog node. This + property is omitted if the dialog node has no parent. + :param str previous_sibling: (optional) The unique ID of the previous + sibling dialog node. This property is omitted if the dialog node has no + previous sibling. + :param DialogNodeOutput output: (optional) The output of the dialog node. + For more information about how to specify dialog node output, see the + [documentation](https://cloud.ibm.com/docs/assistant?topic=assistant-dialog-overview#dialog-overview-responses). + :param DialogNodeContext context: (optional) The context for the dialog + node. + :param dict metadata: (optional) The metadata for the dialog node. + :param DialogNodeNextStep next_step: (optional) The next step to execute + following this dialog node. + :param str title: (optional) A human-readable name for the dialog node. If + the node is included in disambiguation, this title is used to populate the + **label** property of the corresponding suggestion in the `suggestion` + response type (unless it is overridden by the **user_label** property). The + title is also used to populate the **topic** property in the + `connect_to_agent` response type. + This string can contain only Unicode alphanumeric, space, underscore, + hyphen, and dot characters. + :param str type: (optional) How the dialog node is processed. + :param str event_name: (optional) How an `event_handler` node is processed. + :param str variable: (optional) The location in the dialog context where + output is stored. + :param List[DialogNodeAction] actions: (optional) An array of objects + describing any actions to be invoked by the dialog node. + :param str digress_in: (optional) Whether this top-level dialog node can be + digressed into. + :param str digress_out: (optional) Whether this dialog node can be returned + to after a digression. + :param str digress_out_slots: (optional) Whether the user can digress to + top-level nodes while filling out slots. + :param str user_label: (optional) A label that can be displayed externally + to describe the purpose of the node to users. If set, this label is used to + identify the node in disambiguation responses (overriding the value of the + **title** property). + :param bool disambiguation_opt_out: (optional) Whether the dialog node + should be excluded from disambiguation suggestions. Valid only when + **type**=`standard` or `frame`. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `DialogNode` object """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') if dialog_node is None: raise ValueError('dialog_node must be provided') - if new_output is not None: - new_output = self._convert_model(new_output, DialogNodeOutput) - if new_next_step is not None: - new_next_step = self._convert_model(new_next_step, - DialogNodeNextStep) - if new_actions is not None: - new_actions = [ - self._convert_model(x, DialogNodeAction) for x in new_actions - ] + if output is not None: + output = convert_model(output) + if context is not None: + context = convert_model(context) + if next_step is not None: + next_step = convert_model(next_step) + if actions is not None: + actions = [convert_model(x) for x in actions] + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='create_dialog_node', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + 'include_audit': include_audit, + } + + data = { + 'dialog_node': dialog_node, + 'description': description, + 'conditions': conditions, + 'parent': parent, + 'previous_sibling': previous_sibling, + 'output': output, + 'context': context, + 'metadata': metadata, + 'next_step': next_step, + 'title': title, + 'type': type, + 'event_name': event_name, + 'variable': variable, + 'actions': actions, + 'digress_in': digress_in, + 'digress_out': digress_out, + 'digress_out_slots': digress_out_slots, + 'user_label': user_label, + 'disambiguation_opt_out': disambiguation_opt_out, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id'] + path_param_values = self.encode_path_vars(workspace_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/dialog_nodes'.format( + **path_param_dict) + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + data=data, + ) + + response = self.send(request, **kwargs) + return response + + def get_dialog_node( + self, + workspace_id: str, + dialog_node: str, + *, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: + """ + Get dialog node. + + Get information about a dialog node. + + :param str workspace_id: Unique identifier of the workspace. + :param str dialog_node: The dialog node ID (for example, + `node_1_1479323581900`). + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `DialogNode` object + """ + if not workspace_id: + raise ValueError('workspace_id must be provided') + if not dialog_node: + raise ValueError('dialog_node must be provided') headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='get_dialog_node', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + 'include_audit': include_audit, + } + if 'headers' in kwargs: headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', - 'update_dialog_node') + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id', 'dialog_node'] + path_param_values = self.encode_path_vars(workspace_id, dialog_node) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/dialog_nodes/{dialog_node}'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def update_dialog_node( + self, + workspace_id: str, + dialog_node: str, + *, + new_dialog_node: Optional[str] = None, + new_description: Optional[str] = None, + new_conditions: Optional[str] = None, + new_parent: Optional[str] = None, + new_previous_sibling: Optional[str] = None, + new_output: Optional['DialogNodeOutput'] = None, + new_context: Optional['DialogNodeContext'] = None, + new_metadata: Optional[dict] = None, + new_next_step: Optional['DialogNodeNextStep'] = None, + new_title: Optional[str] = None, + new_type: Optional[str] = None, + new_event_name: Optional[str] = None, + new_variable: Optional[str] = None, + new_actions: Optional[List['DialogNodeAction']] = None, + new_digress_in: Optional[str] = None, + new_digress_out: Optional[str] = None, + new_digress_out_slots: Optional[str] = None, + new_user_label: Optional[str] = None, + new_disambiguation_opt_out: Optional[bool] = None, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: + """ + Update dialog node. + + Update an existing dialog node with new or modified data. + If you want to update multiple dialog nodes with a single API call, consider using + the **[Update workspace](#update-workspace)** method instead. + + :param str workspace_id: Unique identifier of the workspace. + :param str dialog_node: The dialog node ID (for example, + `node_1_1479323581900`). + :param str new_dialog_node: (optional) The unique ID of the dialog node. + This is an internal identifier used to refer to the dialog node from other + dialog nodes and in the diagnostic information included with message + responses. + This string can contain only Unicode alphanumeric, space, underscore, + hyphen, and dot characters. + :param str new_description: (optional) The description of the dialog node. + This string cannot contain carriage return, newline, or tab characters. + :param str new_conditions: (optional) The condition that will trigger the + dialog node. This string cannot contain carriage return, newline, or tab + characters. + :param str new_parent: (optional) The unique ID of the parent dialog node. + This property is omitted if the dialog node has no parent. + :param str new_previous_sibling: (optional) The unique ID of the previous + sibling dialog node. This property is omitted if the dialog node has no + previous sibling. + :param DialogNodeOutput new_output: (optional) The output of the dialog + node. For more information about how to specify dialog node output, see the + [documentation](https://cloud.ibm.com/docs/assistant?topic=assistant-dialog-overview#dialog-overview-responses). + :param DialogNodeContext new_context: (optional) The context for the dialog + node. + :param dict new_metadata: (optional) The metadata for the dialog node. + :param DialogNodeNextStep new_next_step: (optional) The next step to + execute following this dialog node. + :param str new_title: (optional) A human-readable name for the dialog node. + If the node is included in disambiguation, this title is used to populate + the **label** property of the corresponding suggestion in the `suggestion` + response type (unless it is overridden by the **user_label** property). The + title is also used to populate the **topic** property in the + `connect_to_agent` response type. + This string can contain only Unicode alphanumeric, space, underscore, + hyphen, and dot characters. + :param str new_type: (optional) How the dialog node is processed. + :param str new_event_name: (optional) How an `event_handler` node is + processed. + :param str new_variable: (optional) The location in the dialog context + where output is stored. + :param List[DialogNodeAction] new_actions: (optional) An array of objects + describing any actions to be invoked by the dialog node. + :param str new_digress_in: (optional) Whether this top-level dialog node + can be digressed into. + :param str new_digress_out: (optional) Whether this dialog node can be + returned to after a digression. + :param str new_digress_out_slots: (optional) Whether the user can digress + to top-level nodes while filling out slots. + :param str new_user_label: (optional) A label that can be displayed + externally to describe the purpose of the node to users. If set, this label + is used to identify the node in disambiguation responses (overriding the + value of the **title** property). + :param bool new_disambiguation_opt_out: (optional) Whether the dialog node + should be excluded from disambiguation suggestions. Valid only when + **type**=`standard` or `frame`. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `DialogNode` object + """ + + if not workspace_id: + raise ValueError('workspace_id must be provided') + if not dialog_node: + raise ValueError('dialog_node must be provided') + if new_output is not None: + new_output = convert_model(new_output) + if new_context is not None: + new_context = convert_model(new_context) + if new_next_step is not None: + new_next_step = convert_model(new_next_step) + if new_actions is not None: + new_actions = [convert_model(x) for x in new_actions] + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='update_dialog_node', + ) headers.update(sdk_headers) - params = {'version': self.version} + params = { + 'version': self.version, + 'include_audit': include_audit, + } data = { 'dialog_node': new_dialog_node, @@ -2670,147 +3805,256 @@ def update_dialog_node(self, 'metadata': new_metadata, 'next_step': new_next_step, 'title': new_title, - 'type': new_node_type, + 'type': new_type, 'event_name': new_event_name, 'variable': new_variable, 'actions': new_actions, 'digress_in': new_digress_in, 'digress_out': new_digress_out, 'digress_out_slots': new_digress_out_slots, - 'user_label': new_user_label + 'user_label': new_user_label, + 'disambiguation_opt_out': new_disambiguation_opt_out, } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' - url = '/v1/workspaces/{0}/dialog_nodes/{1}'.format( - *self._encode_path_vars(workspace_id, dialog_node)) - response = self.request( + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id', 'dialog_node'] + path_param_values = self.encode_path_vars(workspace_id, dialog_node) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/dialog_nodes/{dialog_node}'.format( + **path_param_dict) + request = self.prepare_request( method='POST', url=url, headers=headers, params=params, - json=data, - accept_json=True) - return response + data=data, + ) - ######################### - # Logs - ######################### + response = self.send(request, **kwargs) + return response - def list_all_logs(self, - filter, - sort=None, - page_limit=None, - cursor=None, - **kwargs): + def delete_dialog_node( + self, + workspace_id: str, + dialog_node: str, + **kwargs, + ) -> DetailedResponse: """ - List log events in all workspaces. + Delete dialog node. - List the events from the logs of all workspaces in the service instance. - If **cursor** is not specified, this operation is limited to 40 requests per 30 - minutes. If **cursor** is specified, the limit is 120 requests per minute. For - more information, see **Rate limiting**. - - :param str filter: A cacheable parameter that limits the results to those matching - the specified filter. You must specify a filter query that includes a value for - `language`, as well as a value for `workspace_id` or - `request.context.metadata.deployment`. For more information, see the - [documentation](https://cloud.ibm.com/docs/services/assistant/filter-reference.html#filter-reference-syntax). - :param str sort: How to sort the returned log events. You can sort by - **request_timestamp**. To reverse the sort order, prefix the parameter value with - a minus sign (`-`). - :param int page_limit: The number of records to return in each page of results. - :param str cursor: A token identifying the page of results to retrieve. + Delete a dialog node from a workspace. + + :param str workspace_id: Unique identifier of the workspace. + :param str dialog_node: The dialog node ID (for example, + `node_1_1479323581900`). :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ - if filter is None: - raise ValueError('filter must be provided') - + if not workspace_id: + raise ValueError('workspace_id must be provided') + if not dialog_node: + raise ValueError('dialog_node must be provided') headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'list_all_logs') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='delete_dialog_node', + ) headers.update(sdk_headers) params = { 'version': self.version, - 'filter': filter, - 'sort': sort, - 'page_limit': page_limit, - 'cursor': cursor } - url = '/v1/logs' - response = self.request( - method='GET', + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id', 'dialog_node'] + path_param_values = self.encode_path_vars(workspace_id, dialog_node) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/dialog_nodes/{dialog_node}'.format( + **path_param_dict) + request = self.prepare_request( + method='DELETE', url=url, headers=headers, params=params, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response - def list_logs(self, - workspace_id, - sort=None, - filter=None, - page_limit=None, - cursor=None, - **kwargs): + ######################### + # Logs + ######################### + + def list_logs( + self, + workspace_id: str, + *, + sort: Optional[str] = None, + filter: Optional[str] = None, + page_limit: Optional[int] = None, + cursor: Optional[str] = None, + **kwargs, + ) -> DetailedResponse: """ List log events in a workspace. List the events from the log of a specific workspace. - If **cursor** is not specified, this operation is limited to 40 requests per 30 - minutes. If **cursor** is specified, the limit is 120 requests per minute. For - more information, see **Rate limiting**. + This method requires Manager access. + **Note:** If you use the **cursor** parameter to retrieve results one page at a + time, subsequent requests must be no more than 5 minutes apart. Any returned value + for the **cursor** parameter becomes invalid after 5 minutes. For more information + about using pagination, see [Pagination](#pagination). :param str workspace_id: Unique identifier of the workspace. - :param str sort: How to sort the returned log events. You can sort by - **request_timestamp**. To reverse the sort order, prefix the parameter value with - a minus sign (`-`). - :param str filter: A cacheable parameter that limits the results to those matching - the specified filter. For more information, see the - [documentation](https://cloud.ibm.com/docs/services/assistant/filter-reference.html#filter-reference-syntax). - :param int page_limit: The number of records to return in each page of results. - :param str cursor: A token identifying the page of results to retrieve. + :param str sort: (optional) How to sort the returned log events. You can + sort by **request_timestamp**. To reverse the sort order, prefix the + parameter value with a minus sign (`-`). + :param str filter: (optional) A cacheable parameter that limits the results + to those matching the specified filter. For more information, see the + [documentation](https://cloud.ibm.com/docs/assistant?topic=assistant-filter-reference#filter-reference). + :param int page_limit: (optional) The number of records to return in each + page of results. + **Note:** If the API is not returning your data, try lowering the + page_limit value. + :param str cursor: (optional) A token identifying the page of results to + retrieve. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `LogCollection` object """ - if workspace_id is None: + if not workspace_id: raise ValueError('workspace_id must be provided') - headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='list_logs', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + 'sort': sort, + 'filter': filter, + 'page_limit': page_limit, + 'cursor': cursor, + } + if 'headers' in kwargs: headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'list_logs') + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['workspace_id'] + path_param_values = self.encode_path_vars(workspace_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/workspaces/{workspace_id}/logs'.format(**path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def list_all_logs( + self, + filter: str, + *, + sort: Optional[str] = None, + page_limit: Optional[int] = None, + cursor: Optional[str] = None, + **kwargs, + ) -> DetailedResponse: + """ + List log events in all workspaces. + + List the events from the logs of all workspaces in the service instance. + **Note:** If you use the **cursor** parameter to retrieve results one page at a + time, subsequent requests must be no more than 5 minutes apart. Any returned value + for the **cursor** parameter becomes invalid after 5 minutes. For more information + about using pagination, see [Pagination](#pagination). + + :param str filter: A cacheable parameter that limits the results to those + matching the specified filter. You must specify a filter query that + includes a value for `language`, as well as a value for + `request.context.system.assistant_id`, `workspace_id`, or + `request.context.metadata.deployment`. These required filters must be + specified using the exact match (`::`) operator. For more information, see + the + [documentation](https://cloud.ibm.com/docs/assistant?topic=assistant-filter-reference#filter-reference). + :param str sort: (optional) How to sort the returned log events. You can + sort by **request_timestamp**. To reverse the sort order, prefix the + parameter value with a minus sign (`-`). + :param int page_limit: (optional) The number of records to return in each + page of results. + :param str cursor: (optional) A token identifying the page of results to + retrieve. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `LogCollection` object + """ + + if not filter: + raise ValueError('filter must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='list_all_logs', + ) headers.update(sdk_headers) params = { 'version': self.version, - 'sort': sort, 'filter': filter, + 'sort': sort, 'page_limit': page_limit, - 'cursor': cursor + 'cursor': cursor, } - url = '/v1/workspaces/{0}/logs'.format( - *self._encode_path_vars(workspace_id)) - response = self.request( + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + url = '/v1/logs' + request = self.prepare_request( method='GET', url=url, headers=headers, params=params, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response ######################### # User data ######################### - def delete_user_data(self, customer_id, **kwargs): + def delete_user_data( + self, + customer_id: str, + **kwargs, + ) -> DetailedResponse: """ Delete labeled data. @@ -2819,4753 +4063,11104 @@ def delete_user_data(self, customer_id, **kwargs): You associate a customer ID with data by passing the `X-Watson-Metadata` header with a request that passes data. For more information about personal data and customer IDs, see [Information - security](https://cloud.ibm.com/docs/services/assistant/information-security.html). - - :param str customer_id: The customer ID for which all data is to be deleted. + security](https://cloud.ibm.com/docs/assistant?topic=assistant-information-security#information-security). + **Note:** This operation is intended only for deleting data associated with a + single specific customer, not for deleting data associated with multiple customers + or for any other purpose. For more information, see [Labeling and deleting data in + Watson + Assistant](https://cloud.ibm.com/docs/assistant?topic=assistant-information-security#information-security-gdpr-wa). + + :param str customer_id: The customer ID for which all data is to be + deleted. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ - if customer_id is None: + if not customer_id: raise ValueError('customer_id must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V1', 'delete_user_data') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='delete_user_data', + ) headers.update(sdk_headers) - params = {'version': self.version, 'customer_id': customer_id} + params = { + 'version': self.version, + 'customer_id': customer_id, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' url = '/v1/user_data' - response = self.request( + request = self.prepare_request( method='DELETE', url=url, headers=headers, params=params, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response -############################################################################## -# Models -############################################################################## +class ListWorkspacesEnums: + """ + Enums for list_workspaces parameters. + """ + + class Sort(str, Enum): + """ + The attribute by which returned workspaces will be sorted. To reverse the sort + order, prefix the value with a minus sign (`-`). + """ + NAME = 'name' + UPDATED = 'updated' -class CaptureGroup(object): - """ - A recognized capture group for a pattern-based entity. - :attr str group: A recognized capture group for the entity. - :attr list[int] location: (optional) Zero-based character offsets that indicate where - the entity value begins and ends in the input text. +class GetWorkspaceEnums: + """ + Enums for get_workspace parameters. """ - def __init__(self, group, location=None): + class Sort(str, Enum): """ - Initialize a CaptureGroup object. - - :param str group: A recognized capture group for the entity. - :param list[int] location: (optional) Zero-based character offsets that indicate - where the entity value begins and ends in the input text. + Indicates how the returned workspace data will be sorted. This parameter is valid + only if **export**=`true`. Specify `sort=stable` to sort all workspace objects by + unique identifier, in ascending alphabetical order. """ - self.group = group - self.location = location - @classmethod - def _from_dict(cls, _dict): - """Initialize a CaptureGroup object from a json dictionary.""" - args = {} - if 'group' in _dict: - args['group'] = _dict.get('group') - else: - raise ValueError( - 'Required property \'group\' not present in CaptureGroup JSON') - if 'location' in _dict: - args['location'] = _dict.get('location') - return cls(**args) + STABLE = 'stable' - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'group') and self.group is not None: - _dict['group'] = self.group - if hasattr(self, 'location') and self.location is not None: - _dict['location'] = self.location - return _dict - def __str__(self): - """Return a `str` version of this CaptureGroup object.""" - return json.dumps(self._to_dict(), indent=2) +class ExportWorkspaceAsyncEnums: + """ + Enums for export_workspace_async parameters. + """ - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ + class Sort(str, Enum): + """ + Indicates how the returned workspace data will be sorted. Specify `sort=stable` to + sort all workspace objects by unique identifier, in ascending alphabetical order. + """ - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other + STABLE = 'stable' -class Context(object): +class ListIntentsEnums: """ - State information for the conversation. To maintain state, include the context from - the previous response. - - :attr str conversation_id: (optional) The unique identifier of the conversation. - :attr SystemResponse system: (optional) For internal use only. - :attr MessageContextMetadata metadata: (optional) Metadata related to the message. + Enums for list_intents parameters. """ - def __init__(self, - conversation_id=None, - system=None, - metadata=None, - **kwargs): + class Sort(str, Enum): """ - Initialize a Context object. - - :param str conversation_id: (optional) The unique identifier of the conversation. - :param SystemResponse system: (optional) For internal use only. - :param MessageContextMetadata metadata: (optional) Metadata related to the - message. - :param **kwargs: (optional) Any additional properties. + The attribute by which returned intents will be sorted. To reverse the sort order, + prefix the value with a minus sign (`-`). """ - self.conversation_id = conversation_id - self.system = system - self.metadata = metadata - for _key, _value in kwargs.items(): - setattr(self, _key, _value) - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Context object from a json dictionary.""" - args = {} - xtra = _dict.copy() - if 'conversation_id' in _dict: - args['conversation_id'] = _dict.get('conversation_id') - del xtra['conversation_id'] - if 'system' in _dict: - args['system'] = SystemResponse._from_dict(_dict.get('system')) - del xtra['system'] - if 'metadata' in _dict: - args['metadata'] = MessageContextMetadata._from_dict( - _dict.get('metadata')) - del xtra['metadata'] - args.update(xtra) - return cls(**args) - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, - 'conversation_id') and self.conversation_id is not None: - _dict['conversation_id'] = self.conversation_id - if hasattr(self, 'system') and self.system is not None: - _dict['system'] = self.system._to_dict() - if hasattr(self, 'metadata') and self.metadata is not None: - _dict['metadata'] = self.metadata._to_dict() - if hasattr(self, '_additionalProperties'): - for _key in self._additionalProperties: - _value = getattr(self, _key, None) - if _value is not None: - _dict[_key] = _value - return _dict + INTENT = 'intent' + UPDATED = 'updated' - def __setattr__(self, name, value): - properties = {'conversation_id', 'system', 'metadata'} - if not hasattr(self, '_additionalProperties'): - super(Context, self).__setattr__('_additionalProperties', set()) - if name not in properties: - self._additionalProperties.add(name) - super(Context, self).__setattr__(name, value) - def __str__(self): - """Return a `str` version of this Context object.""" - return json.dumps(self._to_dict(), indent=2) +class ListExamplesEnums: + """ + Enums for list_examples parameters. + """ - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ + class Sort(str, Enum): + """ + The attribute by which returned examples will be sorted. To reverse the sort + order, prefix the value with a minus sign (`-`). + """ - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other + TEXT = 'text' + UPDATED = 'updated' -class Counterexample(object): +class ListCounterexamplesEnums: """ - Counterexample. - - :attr str text: The text of a user input marked as irrelevant input. This string must - conform to the following restrictions: - - It cannot contain carriage return, newline, or tab characters - - It cannot consist of only whitespace characters - - It must be no longer than 1024 characters. - :attr datetime created: (optional) The timestamp for creation of the object. - :attr datetime updated: (optional) The timestamp for the most recent update to the - object. + Enums for list_counterexamples parameters. """ - def __init__(self, text, created=None, updated=None): + class Sort(str, Enum): """ - Initialize a Counterexample object. - - :param str text: The text of a user input marked as irrelevant input. This string - must conform to the following restrictions: - - It cannot contain carriage return, newline, or tab characters - - It cannot consist of only whitespace characters - - It must be no longer than 1024 characters. - :param datetime created: (optional) The timestamp for creation of the object. - :param datetime updated: (optional) The timestamp for the most recent update to - the object. + The attribute by which returned counterexamples will be sorted. To reverse the + sort order, prefix the value with a minus sign (`-`). """ - self.text = text - self.created = created - self.updated = updated + + TEXT = 'text' + UPDATED = 'updated' + + +class ListEntitiesEnums: + """ + Enums for list_entities parameters. + """ + + class Sort(str, Enum): + """ + The attribute by which returned entities will be sorted. To reverse the sort + order, prefix the value with a minus sign (`-`). + """ + + ENTITY = 'entity' + UPDATED = 'updated' + + +class ListValuesEnums: + """ + Enums for list_values parameters. + """ + + class Sort(str, Enum): + """ + The attribute by which returned entity values will be sorted. To reverse the sort + order, prefix the value with a minus sign (`-`). + """ + + VALUE = 'value' + UPDATED = 'updated' + + +class ListSynonymsEnums: + """ + Enums for list_synonyms parameters. + """ + + class Sort(str, Enum): + """ + The attribute by which returned entity value synonyms will be sorted. To reverse + the sort order, prefix the value with a minus sign (`-`). + """ + + SYNONYM = 'synonym' + UPDATED = 'updated' + + +class ListDialogNodesEnums: + """ + Enums for list_dialog_nodes parameters. + """ + + class Sort(str, Enum): + """ + The attribute by which returned dialog nodes will be sorted. To reverse the sort + order, prefix the value with a minus sign (`-`). + """ + + DIALOG_NODE = 'dialog_node' + UPDATED = 'updated' + + +############################################################################## +# Models +############################################################################## + + +class AgentAvailabilityMessage: + """ + AgentAvailabilityMessage. + + :param str message: (optional) The text of the message. + """ + + def __init__( + self, + *, + message: Optional[str] = None, + ) -> None: + """ + Initialize a AgentAvailabilityMessage object. + + :param str message: (optional) The text of the message. + """ + self.message = message @classmethod - def _from_dict(cls, _dict): - """Initialize a Counterexample object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'AgentAvailabilityMessage': + """Initialize a AgentAvailabilityMessage object from a json dictionary.""" args = {} - if 'text' in _dict: - args['text'] = _dict.get('text') - else: - raise ValueError( - 'Required property \'text\' not present in Counterexample JSON') - if 'created' in _dict: - args['created'] = string_to_datetime(_dict.get('created')) - if 'updated' in _dict: - args['updated'] = string_to_datetime(_dict.get('updated')) + if (message := _dict.get('message')) is not None: + args['message'] = message return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a AgentAvailabilityMessage object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - if hasattr(self, 'created') and self.created is not None: - _dict['created'] = datetime_to_string(self.created) - if hasattr(self, 'updated') and self.updated is not None: - _dict['updated'] = datetime_to_string(self.updated) + if hasattr(self, 'message') and self.message is not None: + _dict['message'] = self.message return _dict - def __str__(self): - """Return a `str` version of this Counterexample object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __eq__(self, other): + def __str__(self) -> str: + """Return a `str` version of this AgentAvailabilityMessage object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'AgentAvailabilityMessage') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'AgentAvailabilityMessage') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class CounterexampleCollection(object): +class BulkClassifyOutput: """ - CounterexampleCollection. - - :attr list[Counterexample] counterexamples: An array of objects describing the - examples marked as irrelevant input. - :attr Pagination pagination: The pagination data for the returned objects. + BulkClassifyOutput. + + :param BulkClassifyUtterance input: (optional) The user input utterance to + classify. + :param List[RuntimeEntity] entities: (optional) An array of entities identified + in the utterance. + :param List[RuntimeIntent] intents: (optional) An array of intents recognized in + the utterance. """ - def __init__(self, counterexamples, pagination): - """ - Initialize a CounterexampleCollection object. - - :param list[Counterexample] counterexamples: An array of objects describing the - examples marked as irrelevant input. - :param Pagination pagination: The pagination data for the returned objects. + def __init__( + self, + *, + input: Optional['BulkClassifyUtterance'] = None, + entities: Optional[List['RuntimeEntity']] = None, + intents: Optional[List['RuntimeIntent']] = None, + ) -> None: + """ + Initialize a BulkClassifyOutput object. + + :param BulkClassifyUtterance input: (optional) The user input utterance to + classify. + :param List[RuntimeEntity] entities: (optional) An array of entities + identified in the utterance. + :param List[RuntimeIntent] intents: (optional) An array of intents + recognized in the utterance. """ - self.counterexamples = counterexamples - self.pagination = pagination + self.input = input + self.entities = entities + self.intents = intents @classmethod - def _from_dict(cls, _dict): - """Initialize a CounterexampleCollection object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'BulkClassifyOutput': + """Initialize a BulkClassifyOutput object from a json dictionary.""" args = {} - if 'counterexamples' in _dict: - args['counterexamples'] = [ - Counterexample._from_dict(x) - for x in (_dict.get('counterexamples')) - ] - else: - raise ValueError( - 'Required property \'counterexamples\' not present in CounterexampleCollection JSON' - ) - if 'pagination' in _dict: - args['pagination'] = Pagination._from_dict(_dict.get('pagination')) - else: - raise ValueError( - 'Required property \'pagination\' not present in CounterexampleCollection JSON' - ) + if (input := _dict.get('input')) is not None: + args['input'] = BulkClassifyUtterance.from_dict(input) + if (entities := _dict.get('entities')) is not None: + args['entities'] = [RuntimeEntity.from_dict(v) for v in entities] + if (intents := _dict.get('intents')) is not None: + args['intents'] = [RuntimeIntent.from_dict(v) for v in intents] return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a BulkClassifyOutput object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, - 'counterexamples') and self.counterexamples is not None: - _dict['counterexamples'] = [ - x._to_dict() for x in self.counterexamples - ] - if hasattr(self, 'pagination') and self.pagination is not None: - _dict['pagination'] = self.pagination._to_dict() + if hasattr(self, 'input') and self.input is not None: + if isinstance(self.input, dict): + _dict['input'] = self.input + else: + _dict['input'] = self.input.to_dict() + if hasattr(self, 'entities') and self.entities is not None: + entities_list = [] + for v in self.entities: + if isinstance(v, dict): + entities_list.append(v) + else: + entities_list.append(v.to_dict()) + _dict['entities'] = entities_list + if hasattr(self, 'intents') and self.intents is not None: + intents_list = [] + for v in self.intents: + if isinstance(v, dict): + intents_list.append(v) + else: + intents_list.append(v.to_dict()) + _dict['intents'] = intents_list return _dict - def __str__(self): - """Return a `str` version of this CounterexampleCollection object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this BulkClassifyOutput object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'BulkClassifyOutput') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'BulkClassifyOutput') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class CreateEntity(object): +class BulkClassifyResponse: + """ + BulkClassifyResponse. + + :param List[BulkClassifyOutput] output: (optional) An array of objects that + contain classification information for the submitted input utterances. """ - CreateEntity. - :attr str entity: The name of the entity. This string must conform to the following - restrictions: - - It can contain only Unicode alphanumeric, underscore, and hyphen characters. - - It must be no longer than 64 characters. - If you specify an entity name beginning with the reserved prefix `sys-`, it must be - the name of a system entity that you want to enable. (Any entity content specified - with the request is ignored.). - :attr str description: (optional) The description of the entity. This string cannot - contain carriage return, newline, or tab characters, and it must be no longer than 128 - characters. - :attr dict metadata: (optional) Any metadata related to the entity. - :attr bool fuzzy_match: (optional) Whether to use fuzzy matching for the entity. - :attr datetime created: (optional) The timestamp for creation of the object. - :attr datetime updated: (optional) The timestamp for the most recent update to the - object. - :attr list[CreateValue] values: (optional) An array of objects describing the entity - values. - """ - - def __init__(self, - entity, - description=None, - metadata=None, - fuzzy_match=None, - created=None, - updated=None, - values=None): + def __init__( + self, + *, + output: Optional[List['BulkClassifyOutput']] = None, + ) -> None: """ - Initialize a CreateEntity object. + Initialize a BulkClassifyResponse object. - :param str entity: The name of the entity. This string must conform to the - following restrictions: - - It can contain only Unicode alphanumeric, underscore, and hyphen characters. - - It must be no longer than 64 characters. - If you specify an entity name beginning with the reserved prefix `sys-`, it must - be the name of a system entity that you want to enable. (Any entity content - specified with the request is ignored.). - :param str description: (optional) The description of the entity. This string - cannot contain carriage return, newline, or tab characters, and it must be no - longer than 128 characters. - :param dict metadata: (optional) Any metadata related to the entity. - :param bool fuzzy_match: (optional) Whether to use fuzzy matching for the entity. - :param datetime created: (optional) The timestamp for creation of the object. - :param datetime updated: (optional) The timestamp for the most recent update to - the object. - :param list[CreateValue] values: (optional) An array of objects describing the - entity values. + :param List[BulkClassifyOutput] output: (optional) An array of objects that + contain classification information for the submitted input utterances. """ - self.entity = entity - self.description = description - self.metadata = metadata - self.fuzzy_match = fuzzy_match - self.created = created - self.updated = updated - self.values = values + self.output = output @classmethod - def _from_dict(cls, _dict): - """Initialize a CreateEntity object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'BulkClassifyResponse': + """Initialize a BulkClassifyResponse object from a json dictionary.""" args = {} - if 'entity' in _dict: - args['entity'] = _dict.get('entity') - else: - raise ValueError( - 'Required property \'entity\' not present in CreateEntity JSON') - if 'description' in _dict: - args['description'] = _dict.get('description') - if 'metadata' in _dict: - args['metadata'] = _dict.get('metadata') - if 'fuzzy_match' in _dict: - args['fuzzy_match'] = _dict.get('fuzzy_match') - if 'created' in _dict: - args['created'] = string_to_datetime(_dict.get('created')) - if 'updated' in _dict: - args['updated'] = string_to_datetime(_dict.get('updated')) - if 'values' in _dict: - args['values'] = [ - CreateValue._from_dict(x) for x in (_dict.get('values')) - ] + if (output := _dict.get('output')) is not None: + args['output'] = [BulkClassifyOutput.from_dict(v) for v in output] return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a BulkClassifyResponse object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'entity') and self.entity is not None: - _dict['entity'] = self.entity - if hasattr(self, 'description') and self.description is not None: - _dict['description'] = self.description - if hasattr(self, 'metadata') and self.metadata is not None: - _dict['metadata'] = self.metadata - if hasattr(self, 'fuzzy_match') and self.fuzzy_match is not None: - _dict['fuzzy_match'] = self.fuzzy_match - if hasattr(self, 'created') and self.created is not None: - _dict['created'] = datetime_to_string(self.created) - if hasattr(self, 'updated') and self.updated is not None: - _dict['updated'] = datetime_to_string(self.updated) - if hasattr(self, 'values') and self.values is not None: - _dict['values'] = [x._to_dict() for x in self.values] + if hasattr(self, 'output') and self.output is not None: + output_list = [] + for v in self.output: + if isinstance(v, dict): + output_list.append(v) + else: + output_list.append(v.to_dict()) + _dict['output'] = output_list return _dict - def __str__(self): - """Return a `str` version of this CreateEntity object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this BulkClassifyResponse object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'BulkClassifyResponse') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'BulkClassifyResponse') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class CreateIntent(object): +class BulkClassifyUtterance: + """ + The user input utterance to classify. + + :param str text: The text of the input utterance. """ - CreateIntent. - :attr str intent: The name of the intent. This string must conform to the following - restrictions: - - It can contain only Unicode alphanumeric, underscore, hyphen, and dot characters. - - It cannot begin with the reserved prefix `sys-`. - - It must be no longer than 128 characters. - :attr str description: (optional) The description of the intent. This string cannot - contain carriage return, newline, or tab characters, and it must be no longer than 128 - characters. - :attr datetime created: (optional) The timestamp for creation of the object. - :attr datetime updated: (optional) The timestamp for the most recent update to the - object. - :attr list[Example] examples: (optional) An array of user input examples for the - intent. - """ - - def __init__(self, - intent, - description=None, - created=None, - updated=None, - examples=None): + def __init__( + self, + text: str, + ) -> None: """ - Initialize a CreateIntent object. + Initialize a BulkClassifyUtterance object. - :param str intent: The name of the intent. This string must conform to the - following restrictions: - - It can contain only Unicode alphanumeric, underscore, hyphen, and dot - characters. - - It cannot begin with the reserved prefix `sys-`. - - It must be no longer than 128 characters. - :param str description: (optional) The description of the intent. This string - cannot contain carriage return, newline, or tab characters, and it must be no - longer than 128 characters. - :param datetime created: (optional) The timestamp for creation of the object. - :param datetime updated: (optional) The timestamp for the most recent update to - the object. - :param list[Example] examples: (optional) An array of user input examples for the - intent. + :param str text: The text of the input utterance. """ - self.intent = intent - self.description = description - self.created = created - self.updated = updated - self.examples = examples + self.text = text @classmethod - def _from_dict(cls, _dict): - """Initialize a CreateIntent object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'BulkClassifyUtterance': + """Initialize a BulkClassifyUtterance object from a json dictionary.""" args = {} - if 'intent' in _dict: - args['intent'] = _dict.get('intent') + if (text := _dict.get('text')) is not None: + args['text'] = text else: raise ValueError( - 'Required property \'intent\' not present in CreateIntent JSON') - if 'description' in _dict: - args['description'] = _dict.get('description') - if 'created' in _dict: - args['created'] = string_to_datetime(_dict.get('created')) - if 'updated' in _dict: - args['updated'] = string_to_datetime(_dict.get('updated')) - if 'examples' in _dict: - args['examples'] = [ - Example._from_dict(x) for x in (_dict.get('examples')) - ] + 'Required property \'text\' not present in BulkClassifyUtterance JSON' + ) return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a BulkClassifyUtterance object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'intent') and self.intent is not None: - _dict['intent'] = self.intent - if hasattr(self, 'description') and self.description is not None: - _dict['description'] = self.description - if hasattr(self, 'created') and self.created is not None: - _dict['created'] = datetime_to_string(self.created) - if hasattr(self, 'updated') and self.updated is not None: - _dict['updated'] = datetime_to_string(self.updated) - if hasattr(self, 'examples') and self.examples is not None: - _dict['examples'] = [x._to_dict() for x in self.examples] + if hasattr(self, 'text') and self.text is not None: + _dict['text'] = self.text return _dict - def __str__(self): - """Return a `str` version of this CreateIntent object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this BulkClassifyUtterance object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'BulkClassifyUtterance') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'BulkClassifyUtterance') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class CreateValue(object): +class CaptureGroup: """ - CreateValue. + A recognized capture group for a pattern-based entity. - :attr str value: The text of the entity value. This string must conform to the - following restrictions: - - It cannot contain carriage return, newline, or tab characters. - - It cannot consist of only whitespace characters. - - It must be no longer than 64 characters. - :attr dict metadata: (optional) Any metadata related to the entity value. - :attr str value_type: (optional) Specifies the type of entity value. - :attr list[str] synonyms: (optional) An array of synonyms for the entity value. A - value can specify either synonyms or patterns (depending on the value type), but not - both. A synonym must conform to the following resrictions: - - It cannot contain carriage return, newline, or tab characters. - - It cannot consist of only whitespace characters. - - It must be no longer than 64 characters. - :attr list[str] patterns: (optional) An array of patterns for the entity value. A - value can specify either synonyms or patterns (depending on the value type), but not - both. A pattern is a regular expression no longer than 512 characters. For more - information about how to specify a pattern, see the - [documentation](https://cloud.ibm.com/docs/services/assistant/entities.html#entities-create-dictionary-based). - :attr datetime created: (optional) The timestamp for creation of the object. - :attr datetime updated: (optional) The timestamp for the most recent update to the - object. - """ - - def __init__(self, - value, - metadata=None, - value_type=None, - synonyms=None, - patterns=None, - created=None, - updated=None): + :param str group: A recognized capture group for the entity. + :param List[int] location: (optional) Zero-based character offsets that indicate + where the entity value begins and ends in the input text. + """ + + def __init__( + self, + group: str, + *, + location: Optional[List[int]] = None, + ) -> None: """ - Initialize a CreateValue object. + Initialize a CaptureGroup object. - :param str value: The text of the entity value. This string must conform to the - following restrictions: - - It cannot contain carriage return, newline, or tab characters. - - It cannot consist of only whitespace characters. - - It must be no longer than 64 characters. - :param dict metadata: (optional) Any metadata related to the entity value. - :param str value_type: (optional) Specifies the type of entity value. - :param list[str] synonyms: (optional) An array of synonyms for the entity value. A - value can specify either synonyms or patterns (depending on the value type), but - not both. A synonym must conform to the following resrictions: - - It cannot contain carriage return, newline, or tab characters. - - It cannot consist of only whitespace characters. - - It must be no longer than 64 characters. - :param list[str] patterns: (optional) An array of patterns for the entity value. A - value can specify either synonyms or patterns (depending on the value type), but - not both. A pattern is a regular expression no longer than 512 characters. For - more information about how to specify a pattern, see the - [documentation](https://cloud.ibm.com/docs/services/assistant/entities.html#entities-create-dictionary-based). - :param datetime created: (optional) The timestamp for creation of the object. - :param datetime updated: (optional) The timestamp for the most recent update to - the object. + :param str group: A recognized capture group for the entity. + :param List[int] location: (optional) Zero-based character offsets that + indicate where the entity value begins and ends in the input text. """ - self.value = value - self.metadata = metadata - self.value_type = value_type - self.synonyms = synonyms - self.patterns = patterns - self.created = created - self.updated = updated + self.group = group + self.location = location @classmethod - def _from_dict(cls, _dict): - """Initialize a CreateValue object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'CaptureGroup': + """Initialize a CaptureGroup object from a json dictionary.""" args = {} - if 'value' in _dict: - args['value'] = _dict.get('value') + if (group := _dict.get('group')) is not None: + args['group'] = group else: raise ValueError( - 'Required property \'value\' not present in CreateValue JSON') - if 'metadata' in _dict: - args['metadata'] = _dict.get('metadata') - if 'type' in _dict or 'value_type' in _dict: - args['value_type'] = _dict.get('type') or _dict.get('value_type') - if 'synonyms' in _dict: - args['synonyms'] = _dict.get('synonyms') - if 'patterns' in _dict: - args['patterns'] = _dict.get('patterns') - if 'created' in _dict: - args['created'] = string_to_datetime(_dict.get('created')) - if 'updated' in _dict: - args['updated'] = string_to_datetime(_dict.get('updated')) + 'Required property \'group\' not present in CaptureGroup JSON') + if (location := _dict.get('location')) is not None: + args['location'] = location return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a CaptureGroup object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'value') and self.value is not None: - _dict['value'] = self.value - if hasattr(self, 'metadata') and self.metadata is not None: - _dict['metadata'] = self.metadata - if hasattr(self, 'value_type') and self.value_type is not None: - _dict['type'] = self.value_type - if hasattr(self, 'synonyms') and self.synonyms is not None: - _dict['synonyms'] = self.synonyms - if hasattr(self, 'patterns') and self.patterns is not None: - _dict['patterns'] = self.patterns - if hasattr(self, 'created') and self.created is not None: - _dict['created'] = datetime_to_string(self.created) - if hasattr(self, 'updated') and self.updated is not None: - _dict['updated'] = datetime_to_string(self.updated) + if hasattr(self, 'group') and self.group is not None: + _dict['group'] = self.group + if hasattr(self, 'location') and self.location is not None: + _dict['location'] = self.location return _dict - def __str__(self): - """Return a `str` version of this CreateValue object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __eq__(self, other): + def __str__(self) -> str: + """Return a `str` version of this CaptureGroup object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'CaptureGroup') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'CaptureGroup') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class DialogNode(object): +class ChannelTransferInfo: + """ + Information used by an integration to transfer the conversation to a different + channel. + + :param ChannelTransferTarget target: An object specifying target channels + available for the transfer. Each property of this object represents an available + transfer target. Currently, the only supported property is **chat**, + representing the web chat integration. """ - DialogNode. - :attr str dialog_node: The dialog node ID. This string must conform to the following - restrictions: - - It can contain only Unicode alphanumeric, space, underscore, hyphen, and dot - characters. - - It must be no longer than 1024 characters. - :attr str description: (optional) The description of the dialog node. This string - cannot contain carriage return, newline, or tab characters, and it must be no longer - than 128 characters. - :attr str conditions: (optional) The condition that will trigger the dialog node. This - string cannot contain carriage return, newline, or tab characters, and it must be no - longer than 2048 characters. - :attr str parent: (optional) The ID of the parent dialog node. This property is - omitted if the dialog node has no parent. - :attr str previous_sibling: (optional) The ID of the previous sibling dialog node. - This property is omitted if the dialog node has no previous sibling. - :attr DialogNodeOutput output: (optional) The output of the dialog node. For more - information about how to specify dialog node output, see the - [documentation](https://cloud.ibm.com/docs/services/assistant/dialog-overview.html#dialog-overview-responses). - :attr dict context: (optional) The context for the dialog node. - :attr dict metadata: (optional) The metadata for the dialog node. - :attr DialogNodeNextStep next_step: (optional) The next step to execute following this - dialog node. - :attr str title: (optional) The alias used to identify the dialog node. This string - must conform to the following restrictions: - - It can contain only Unicode alphanumeric, space, underscore, hyphen, and dot - characters. - - It must be no longer than 64 characters. - :attr str node_type: (optional) How the dialog node is processed. - :attr str event_name: (optional) How an `event_handler` node is processed. - :attr str variable: (optional) The location in the dialog context where output is - stored. - :attr list[DialogNodeAction] actions: (optional) An array of objects describing any - actions to be invoked by the dialog node. - :attr str digress_in: (optional) Whether this top-level dialog node can be digressed - into. - :attr str digress_out: (optional) Whether this dialog node can be returned to after a - digression. - :attr str digress_out_slots: (optional) Whether the user can digress to top-level - nodes while filling out slots. - :attr str user_label: (optional) A label that can be displayed externally to describe - the purpose of the node to users. This string must be no longer than 512 characters. - :attr bool disabled: (optional) For internal use only. - :attr datetime created: (optional) The timestamp for creation of the object. - :attr datetime updated: (optional) The timestamp for the most recent update to the - object. - """ - - def __init__(self, - dialog_node, - description=None, - conditions=None, - parent=None, - previous_sibling=None, - output=None, - context=None, - metadata=None, - next_step=None, - title=None, - node_type=None, - event_name=None, - variable=None, - actions=None, - digress_in=None, - digress_out=None, - digress_out_slots=None, - user_label=None, - disabled=None, - created=None, - updated=None): + def __init__( + self, + target: 'ChannelTransferTarget', + ) -> None: """ - Initialize a DialogNode object. + Initialize a ChannelTransferInfo object. - :param str dialog_node: The dialog node ID. This string must conform to the - following restrictions: - - It can contain only Unicode alphanumeric, space, underscore, hyphen, and dot - characters. - - It must be no longer than 1024 characters. - :param str description: (optional) The description of the dialog node. This string - cannot contain carriage return, newline, or tab characters, and it must be no - longer than 128 characters. - :param str conditions: (optional) The condition that will trigger the dialog node. - This string cannot contain carriage return, newline, or tab characters, and it - must be no longer than 2048 characters. - :param str parent: (optional) The ID of the parent dialog node. This property is - omitted if the dialog node has no parent. - :param str previous_sibling: (optional) The ID of the previous sibling dialog - node. This property is omitted if the dialog node has no previous sibling. - :param DialogNodeOutput output: (optional) The output of the dialog node. For more - information about how to specify dialog node output, see the - [documentation](https://cloud.ibm.com/docs/services/assistant/dialog-overview.html#dialog-overview-responses). - :param dict context: (optional) The context for the dialog node. - :param dict metadata: (optional) The metadata for the dialog node. - :param DialogNodeNextStep next_step: (optional) The next step to execute following - this dialog node. - :param str title: (optional) The alias used to identify the dialog node. This - string must conform to the following restrictions: - - It can contain only Unicode alphanumeric, space, underscore, hyphen, and dot - characters. - - It must be no longer than 64 characters. - :param str node_type: (optional) How the dialog node is processed. - :param str event_name: (optional) How an `event_handler` node is processed. - :param str variable: (optional) The location in the dialog context where output is - stored. - :param list[DialogNodeAction] actions: (optional) An array of objects describing - any actions to be invoked by the dialog node. - :param str digress_in: (optional) Whether this top-level dialog node can be - digressed into. - :param str digress_out: (optional) Whether this dialog node can be returned to - after a digression. - :param str digress_out_slots: (optional) Whether the user can digress to top-level - nodes while filling out slots. - :param str user_label: (optional) A label that can be displayed externally to - describe the purpose of the node to users. This string must be no longer than 512 - characters. - :param bool disabled: (optional) For internal use only. - :param datetime created: (optional) The timestamp for creation of the object. - :param datetime updated: (optional) The timestamp for the most recent update to - the object. + :param ChannelTransferTarget target: An object specifying target channels + available for the transfer. Each property of this object represents an + available transfer target. Currently, the only supported property is + **chat**, representing the web chat integration. """ - self.dialog_node = dialog_node - self.description = description - self.conditions = conditions - self.parent = parent - self.previous_sibling = previous_sibling - self.output = output - self.context = context - self.metadata = metadata - self.next_step = next_step - self.title = title - self.node_type = node_type - self.event_name = event_name - self.variable = variable - self.actions = actions - self.digress_in = digress_in - self.digress_out = digress_out - self.digress_out_slots = digress_out_slots - self.user_label = user_label - self.disabled = disabled - self.created = created - self.updated = updated + self.target = target @classmethod - def _from_dict(cls, _dict): - """Initialize a DialogNode object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'ChannelTransferInfo': + """Initialize a ChannelTransferInfo object from a json dictionary.""" args = {} - if 'dialog_node' in _dict: - args['dialog_node'] = _dict.get('dialog_node') + if (target := _dict.get('target')) is not None: + args['target'] = ChannelTransferTarget.from_dict(target) else: raise ValueError( - 'Required property \'dialog_node\' not present in DialogNode JSON' + 'Required property \'target\' not present in ChannelTransferInfo JSON' ) - if 'description' in _dict: - args['description'] = _dict.get('description') - if 'conditions' in _dict: - args['conditions'] = _dict.get('conditions') - if 'parent' in _dict: - args['parent'] = _dict.get('parent') - if 'previous_sibling' in _dict: - args['previous_sibling'] = _dict.get('previous_sibling') - if 'output' in _dict: - args['output'] = DialogNodeOutput._from_dict(_dict.get('output')) - if 'context' in _dict: - args['context'] = _dict.get('context') - if 'metadata' in _dict: - args['metadata'] = _dict.get('metadata') - if 'next_step' in _dict: - args['next_step'] = DialogNodeNextStep._from_dict( - _dict.get('next_step')) - if 'title' in _dict: - args['title'] = _dict.get('title') - if 'type' in _dict or 'node_type' in _dict: - args['node_type'] = _dict.get('type') or _dict.get('node_type') - if 'event_name' in _dict: - args['event_name'] = _dict.get('event_name') - if 'variable' in _dict: - args['variable'] = _dict.get('variable') - if 'actions' in _dict: - args['actions'] = [ - DialogNodeAction._from_dict(x) for x in (_dict.get('actions')) - ] - if 'digress_in' in _dict: - args['digress_in'] = _dict.get('digress_in') - if 'digress_out' in _dict: - args['digress_out'] = _dict.get('digress_out') - if 'digress_out_slots' in _dict: - args['digress_out_slots'] = _dict.get('digress_out_slots') - if 'user_label' in _dict: - args['user_label'] = _dict.get('user_label') - if 'disabled' in _dict: - args['disabled'] = _dict.get('disabled') - if 'created' in _dict: - args['created'] = string_to_datetime(_dict.get('created')) - if 'updated' in _dict: - args['updated'] = string_to_datetime(_dict.get('updated')) return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a ChannelTransferInfo object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'dialog_node') and self.dialog_node is not None: - _dict['dialog_node'] = self.dialog_node - if hasattr(self, 'description') and self.description is not None: - _dict['description'] = self.description - if hasattr(self, 'conditions') and self.conditions is not None: - _dict['conditions'] = self.conditions - if hasattr(self, 'parent') and self.parent is not None: - _dict['parent'] = self.parent - if hasattr(self, - 'previous_sibling') and self.previous_sibling is not None: - _dict['previous_sibling'] = self.previous_sibling - if hasattr(self, 'output') and self.output is not None: - _dict['output'] = self.output._to_dict() - if hasattr(self, 'context') and self.context is not None: - _dict['context'] = self.context - if hasattr(self, 'metadata') and self.metadata is not None: - _dict['metadata'] = self.metadata - if hasattr(self, 'next_step') and self.next_step is not None: - _dict['next_step'] = self.next_step._to_dict() - if hasattr(self, 'title') and self.title is not None: - _dict['title'] = self.title - if hasattr(self, 'node_type') and self.node_type is not None: - _dict['type'] = self.node_type - if hasattr(self, 'event_name') and self.event_name is not None: - _dict['event_name'] = self.event_name - if hasattr(self, 'variable') and self.variable is not None: - _dict['variable'] = self.variable - if hasattr(self, 'actions') and self.actions is not None: - _dict['actions'] = [x._to_dict() for x in self.actions] - if hasattr(self, 'digress_in') and self.digress_in is not None: - _dict['digress_in'] = self.digress_in - if hasattr(self, 'digress_out') and self.digress_out is not None: - _dict['digress_out'] = self.digress_out - if hasattr(self, - 'digress_out_slots') and self.digress_out_slots is not None: - _dict['digress_out_slots'] = self.digress_out_slots - if hasattr(self, 'user_label') and self.user_label is not None: - _dict['user_label'] = self.user_label - if hasattr(self, 'disabled') and self.disabled is not None: - _dict['disabled'] = self.disabled - if hasattr(self, 'created') and self.created is not None: - _dict['created'] = datetime_to_string(self.created) - if hasattr(self, 'updated') and self.updated is not None: - _dict['updated'] = datetime_to_string(self.updated) + if hasattr(self, 'target') and self.target is not None: + if isinstance(self.target, dict): + _dict['target'] = self.target + else: + _dict['target'] = self.target.to_dict() return _dict - def __str__(self): - """Return a `str` version of this DialogNode object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ChannelTransferInfo object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'ChannelTransferInfo') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'ChannelTransferInfo') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class DialogNodeAction(object): +class ChannelTransferTarget: """ - DialogNodeAction. + An object specifying target channels available for the transfer. Each property of this + object represents an available transfer target. Currently, the only supported property + is **chat**, representing the web chat integration. - :attr str name: The name of the action. - :attr str action_type: (optional) The type of action to invoke. - :attr dict parameters: (optional) A map of key/value pairs to be provided to the - action. - :attr str result_variable: The location in the dialog context where the result of the - action is stored. - :attr str credentials: (optional) The name of the context variable that the client - application will use to pass in credentials for the action. + :param ChannelTransferTargetChat chat: (optional) Information for transferring + to the web chat integration. """ - def __init__(self, - name, - result_variable, - action_type=None, - parameters=None, - credentials=None): + def __init__( + self, + *, + chat: Optional['ChannelTransferTargetChat'] = None, + ) -> None: """ - Initialize a DialogNodeAction object. + Initialize a ChannelTransferTarget object. - :param str name: The name of the action. - :param str result_variable: The location in the dialog context where the result of - the action is stored. - :param str action_type: (optional) The type of action to invoke. - :param dict parameters: (optional) A map of key/value pairs to be provided to the - action. - :param str credentials: (optional) The name of the context variable that the - client application will use to pass in credentials for the action. + :param ChannelTransferTargetChat chat: (optional) Information for + transferring to the web chat integration. """ - self.name = name - self.action_type = action_type - self.parameters = parameters - self.result_variable = result_variable - self.credentials = credentials + self.chat = chat @classmethod - def _from_dict(cls, _dict): - """Initialize a DialogNodeAction object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'ChannelTransferTarget': + """Initialize a ChannelTransferTarget object from a json dictionary.""" args = {} - if 'name' in _dict: - args['name'] = _dict.get('name') - else: - raise ValueError( - 'Required property \'name\' not present in DialogNodeAction JSON' - ) - if 'type' in _dict or 'action_type' in _dict: - args['action_type'] = _dict.get('type') or _dict.get('action_type') - if 'parameters' in _dict: - args['parameters'] = _dict.get('parameters') - if 'result_variable' in _dict: - args['result_variable'] = _dict.get('result_variable') - else: - raise ValueError( - 'Required property \'result_variable\' not present in DialogNodeAction JSON' - ) - if 'credentials' in _dict: - args['credentials'] = _dict.get('credentials') + if (chat := _dict.get('chat')) is not None: + args['chat'] = ChannelTransferTargetChat.from_dict(chat) return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a ChannelTransferTarget object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'name') and self.name is not None: - _dict['name'] = self.name - if hasattr(self, 'action_type') and self.action_type is not None: - _dict['type'] = self.action_type - if hasattr(self, 'parameters') and self.parameters is not None: - _dict['parameters'] = self.parameters - if hasattr(self, - 'result_variable') and self.result_variable is not None: - _dict['result_variable'] = self.result_variable - if hasattr(self, 'credentials') and self.credentials is not None: - _dict['credentials'] = self.credentials + if hasattr(self, 'chat') and self.chat is not None: + if isinstance(self.chat, dict): + _dict['chat'] = self.chat + else: + _dict['chat'] = self.chat.to_dict() return _dict - def __str__(self): - """Return a `str` version of this DialogNodeAction object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __eq__(self, other): + def __str__(self) -> str: + """Return a `str` version of this ChannelTransferTarget object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ChannelTransferTarget') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'ChannelTransferTarget') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class DialogNodeCollection(object): +class ChannelTransferTargetChat: """ - An array of dialog nodes. + Information for transferring to the web chat integration. - :attr list[DialogNode] dialog_nodes: An array of objects describing the dialog nodes - defined for the workspace. - :attr Pagination pagination: The pagination data for the returned objects. + :param str url: (optional) The URL of the target web chat. """ - def __init__(self, dialog_nodes, pagination): + def __init__( + self, + *, + url: Optional[str] = None, + ) -> None: """ - Initialize a DialogNodeCollection object. + Initialize a ChannelTransferTargetChat object. - :param list[DialogNode] dialog_nodes: An array of objects describing the dialog - nodes defined for the workspace. - :param Pagination pagination: The pagination data for the returned objects. + :param str url: (optional) The URL of the target web chat. """ - self.dialog_nodes = dialog_nodes - self.pagination = pagination + self.url = url @classmethod - def _from_dict(cls, _dict): - """Initialize a DialogNodeCollection object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'ChannelTransferTargetChat': + """Initialize a ChannelTransferTargetChat object from a json dictionary.""" args = {} - if 'dialog_nodes' in _dict: - args['dialog_nodes'] = [ - DialogNode._from_dict(x) for x in (_dict.get('dialog_nodes')) - ] - else: - raise ValueError( - 'Required property \'dialog_nodes\' not present in DialogNodeCollection JSON' - ) - if 'pagination' in _dict: - args['pagination'] = Pagination._from_dict(_dict.get('pagination')) - else: - raise ValueError( - 'Required property \'pagination\' not present in DialogNodeCollection JSON' - ) + if (url := _dict.get('url')) is not None: + args['url'] = url return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a ChannelTransferTargetChat object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'dialog_nodes') and self.dialog_nodes is not None: - _dict['dialog_nodes'] = [x._to_dict() for x in self.dialog_nodes] - if hasattr(self, 'pagination') and self.pagination is not None: - _dict['pagination'] = self.pagination._to_dict() + if hasattr(self, 'url') and self.url is not None: + _dict['url'] = self.url return _dict - def __str__(self): - """Return a `str` version of this DialogNodeCollection object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __eq__(self, other): + def __str__(self) -> str: + """Return a `str` version of this ChannelTransferTargetChat object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ChannelTransferTargetChat') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'ChannelTransferTargetChat') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class DialogNodeNextStep(object): +class Context: """ - The next step to execute following this dialog node. + State information for the conversation. To maintain state, include the context from + the previous response. + + :param str conversation_id: (optional) The unique identifier of the + conversation. The conversation ID cannot contain any of the following + characters: `+` `=` `&&` `||` `>` `<` `!` `(` `)` `{` `}` `[` `]` `^` `"` `~` + `*` `?` `:` `\` `/`. + :param dict system: (optional) For internal use only. + :param MessageContextMetadata metadata: (optional) Metadata related to the + message. + + This type supports additional properties of type object. Any context variable. + """ + + # The set of defined properties for the class + _properties = frozenset(['conversation_id', 'system', 'metadata']) - :attr str behavior: What happens after the dialog node completes. The valid values - depend on the node type: - - The following values are valid for any node: - - `get_user_input` - - `skip_user_input` - - `jump_to` - - If the node is of type `event_handler` and its parent node is of type `slot` or - `frame`, additional values are also valid: - - if **event_name**=`filled` and the type of the parent node is `slot`: - - `reprompt` - - `skip_all_slots` - - if **event_name**=`nomatch` and the type of the parent node is `slot`: - - `reprompt` - - `skip_slot` - - `skip_all_slots` - - if **event_name**=`generic` and the type of the parent node is `frame`: - - `reprompt` - - `skip_slot` - - `skip_all_slots` - If you specify `jump_to`, then you must also specify a value for the `dialog_node` - property. - :attr str dialog_node: (optional) The ID of the dialog node to process next. This - parameter is required if **behavior**=`jump_to`. - :attr str selector: (optional) Which part of the dialog node to process next. - """ - - def __init__(self, behavior, dialog_node=None, selector=None): + def __init__( + self, + *, + conversation_id: Optional[str] = None, + system: Optional[dict] = None, + metadata: Optional['MessageContextMetadata'] = None, + **kwargs: Optional[object], + ) -> None: """ - Initialize a DialogNodeNextStep object. + Initialize a Context object. - :param str behavior: What happens after the dialog node completes. The valid - values depend on the node type: - - The following values are valid for any node: - - `get_user_input` - - `skip_user_input` - - `jump_to` - - If the node is of type `event_handler` and its parent node is of type `slot` or - `frame`, additional values are also valid: - - if **event_name**=`filled` and the type of the parent node is `slot`: - - `reprompt` - - `skip_all_slots` - - if **event_name**=`nomatch` and the type of the parent node is `slot`: - - `reprompt` - - `skip_slot` - - `skip_all_slots` - - if **event_name**=`generic` and the type of the parent node is `frame`: - - `reprompt` - - `skip_slot` - - `skip_all_slots` - If you specify `jump_to`, then you must also specify a value for the `dialog_node` - property. - :param str dialog_node: (optional) The ID of the dialog node to process next. This - parameter is required if **behavior**=`jump_to`. - :param str selector: (optional) Which part of the dialog node to process next. + :param str conversation_id: (optional) The unique identifier of the + conversation. The conversation ID cannot contain any of the following + characters: `+` `=` `&&` `||` `>` `<` `!` `(` `)` `{` `}` `[` `]` `^` `"` + `~` `*` `?` `:` `\` `/`. + :param dict system: (optional) For internal use only. + :param MessageContextMetadata metadata: (optional) Metadata related to the + message. + :param object **kwargs: (optional) Any context variable. """ - self.behavior = behavior - self.dialog_node = dialog_node - self.selector = selector + self.conversation_id = conversation_id + self.system = system + self.metadata = metadata + for k, v in kwargs.items(): + if k not in Context._properties: + if not isinstance(v, object): + raise ValueError( + 'Value for additional property {} must be of type object' + .format(k)) + setattr(self, k, v) + else: + raise ValueError( + 'Property {} cannot be specified as an additional property'. + format(k)) @classmethod - def _from_dict(cls, _dict): - """Initialize a DialogNodeNextStep object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'Context': + """Initialize a Context object from a json dictionary.""" args = {} - if 'behavior' in _dict: - args['behavior'] = _dict.get('behavior') - else: - raise ValueError( - 'Required property \'behavior\' not present in DialogNodeNextStep JSON' - ) - if 'dialog_node' in _dict: - args['dialog_node'] = _dict.get('dialog_node') - if 'selector' in _dict: - args['selector'] = _dict.get('selector') + if (conversation_id := _dict.get('conversation_id')) is not None: + args['conversation_id'] = conversation_id + if (system := _dict.get('system')) is not None: + args['system'] = system + if (metadata := _dict.get('metadata')) is not None: + args['metadata'] = MessageContextMetadata.from_dict(metadata) + for k, v in _dict.items(): + if k not in cls._properties: + if not isinstance(v, object): + raise ValueError( + 'Value for additional property {} must be of type object' + .format(k)) + args[k] = v return cls(**args) + @classmethod + def _from_dict(cls, _dict): + """Initialize a Context object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, + 'conversation_id') and self.conversation_id is not None: + _dict['conversation_id'] = self.conversation_id + if hasattr(self, 'system') and self.system is not None: + _dict['system'] = self.system + if hasattr(self, 'metadata') and self.metadata is not None: + if isinstance(self.metadata, dict): + _dict['metadata'] = self.metadata + else: + _dict['metadata'] = self.metadata.to_dict() + for k in [ + _k for _k in vars(self).keys() if _k not in Context._properties + ]: + _dict[k] = getattr(self, k) + return _dict + def _to_dict(self): """Return a json dictionary representing this model.""" + return self.to_dict() + + def get_properties(self) -> Dict: + """Return the additional properties from this instance of Context in the form of a dict.""" _dict = {} - if hasattr(self, 'behavior') and self.behavior is not None: - _dict['behavior'] = self.behavior - if hasattr(self, 'dialog_node') and self.dialog_node is not None: - _dict['dialog_node'] = self.dialog_node - if hasattr(self, 'selector') and self.selector is not None: - _dict['selector'] = self.selector + for k in [ + _k for _k in vars(self).keys() if _k not in Context._properties + ]: + _dict[k] = getattr(self, k) return _dict - def __str__(self): - """Return a `str` version of this DialogNodeNextStep object.""" - return json.dumps(self._to_dict(), indent=2) + def set_properties(self, _dict: dict): + """Set a dictionary of additional properties in this instance of Context""" + for k in [ + _k for _k in vars(self).keys() if _k not in Context._properties + ]: + delattr(self, k) + for k, v in _dict.items(): + if k not in Context._properties: + if not isinstance(v, object): + raise ValueError( + 'Value for additional property {} must be of type object' + .format(k)) + setattr(self, k, v) + else: + raise ValueError( + 'Property {} cannot be specified as an additional property'. + format(k)) + + def __str__(self) -> str: + """Return a `str` version of this Context object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'Context') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'Context') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class DialogNodeOutput(object): +class Counterexample: """ - The output of the dialog node. For more information about how to specify dialog node - output, see the - [documentation](https://cloud.ibm.com/docs/services/assistant/dialog-overview.html#dialog-overview-responses). + Counterexample. - :attr list[DialogNodeOutputGeneric] generic: (optional) An array of objects describing - the output defined for the dialog node. - :attr DialogNodeOutputModifiers modifiers: (optional) Options that modify how - specified output is handled. + :param str text: The text of a user input marked as irrelevant input. This + string must conform to the following restrictions: + - It cannot contain carriage return, newline, or tab characters. + - It cannot consist of only whitespace characters. + :param datetime created: (optional) The timestamp for creation of the object. + :param datetime updated: (optional) The timestamp for the most recent update to + the object. """ - def __init__(self, generic=None, modifiers=None, **kwargs): + def __init__( + self, + text: str, + *, + created: Optional[datetime] = None, + updated: Optional[datetime] = None, + ) -> None: """ - Initialize a DialogNodeOutput object. + Initialize a Counterexample object. - :param list[DialogNodeOutputGeneric] generic: (optional) An array of objects - describing the output defined for the dialog node. - :param DialogNodeOutputModifiers modifiers: (optional) Options that modify how - specified output is handled. - :param **kwargs: (optional) Any additional properties. + :param str text: The text of a user input marked as irrelevant input. This + string must conform to the following restrictions: + - It cannot contain carriage return, newline, or tab characters. + - It cannot consist of only whitespace characters. """ - self.generic = generic - self.modifiers = modifiers - for _key, _value in kwargs.items(): - setattr(self, _key, _value) + self.text = text + self.created = created + self.updated = updated @classmethod - def _from_dict(cls, _dict): - """Initialize a DialogNodeOutput object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'Counterexample': + """Initialize a Counterexample object from a json dictionary.""" args = {} - xtra = _dict.copy() - if 'generic' in _dict: - args['generic'] = [ - DialogNodeOutputGeneric._from_dict(x) - for x in (_dict.get('generic')) - ] - del xtra['generic'] - if 'modifiers' in _dict: - args['modifiers'] = DialogNodeOutputModifiers._from_dict( - _dict.get('modifiers')) - del xtra['modifiers'] - args.update(xtra) + if (text := _dict.get('text')) is not None: + args['text'] = text + else: + raise ValueError( + 'Required property \'text\' not present in Counterexample JSON') + if (created := _dict.get('created')) is not None: + args['created'] = string_to_datetime(created) + if (updated := _dict.get('updated')) is not None: + args['updated'] = string_to_datetime(updated) return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a Counterexample object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'generic') and self.generic is not None: - _dict['generic'] = [x._to_dict() for x in self.generic] - if hasattr(self, 'modifiers') and self.modifiers is not None: - _dict['modifiers'] = self.modifiers._to_dict() - if hasattr(self, '_additionalProperties'): - for _key in self._additionalProperties: - _value = getattr(self, _key, None) - if _value is not None: - _dict[_key] = _value + if hasattr(self, 'text') and self.text is not None: + _dict['text'] = self.text + if hasattr(self, 'created') and getattr(self, 'created') is not None: + _dict['created'] = datetime_to_string(getattr(self, 'created')) + if hasattr(self, 'updated') and getattr(self, 'updated') is not None: + _dict['updated'] = datetime_to_string(getattr(self, 'updated')) return _dict - def __setattr__(self, name, value): - properties = {'generic', 'modifiers'} - if not hasattr(self, '_additionalProperties'): - super(DialogNodeOutput, self).__setattr__('_additionalProperties', - set()) - if name not in properties: - self._additionalProperties.add(name) - super(DialogNodeOutput, self).__setattr__(name, value) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __str__(self): - """Return a `str` version of this DialogNodeOutput object.""" - return json.dumps(self._to_dict(), indent=2) + def __str__(self) -> str: + """Return a `str` version of this Counterexample object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'Counterexample') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'Counterexample') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class DialogNodeOutputGeneric(object): +class CounterexampleCollection: + """ + CounterexampleCollection. + + :param List[Counterexample] counterexamples: An array of objects describing the + examples marked as irrelevant input. + :param Pagination pagination: The pagination data for the returned objects. For + more information about using pagination, see [Pagination](#pagination). """ - DialogNodeOutputGeneric. - :attr str response_type: The type of response returned by the dialog node. The - specified response type must be supported by the client application or channel. - :attr list[DialogNodeOutputTextValuesElement] values: (optional) A list of one or more - objects defining text responses. Required when **response_type**=`text`. - :attr str selection_policy: (optional) How a response is selected from the list, if - more than one response is specified. Valid only when **response_type**=`text`. - :attr str delimiter: (optional) The delimiter to use as a separator between responses - when `selection_policy`=`multiline`. - :attr int time: (optional) How long to pause, in milliseconds. The valid values are - from 0 to 10000. Valid only when **response_type**=`pause`. - :attr bool typing: (optional) Whether to send a "user is typing" event during the - pause. Ignored if the channel does not support this event. Valid only when - **response_type**=`pause`. - :attr str source: (optional) The URL of the image. Required when - **response_type**=`image`. - :attr str title: (optional) An optional title to show before the response. Valid only - when **response_type**=`image` or `option`. This string must be no longer than 512 - characters. - :attr str description: (optional) An optional description to show with the response. - Valid only when **response_type**=`image` or `option`. This string must be no longer - than 256 characters. - :attr str preference: (optional) The preferred type of control to display, if - supported by the channel. Valid only when **response_type**=`option`. - :attr list[DialogNodeOutputOptionsElement] options: (optional) An array of objects - describing the options from which the user can choose. You can include up to 20 - options. Required when **response_type**=`option`. - :attr str message_to_human_agent: (optional) An optional message to be sent to the - human agent who will be taking over the conversation. Valid only when - **reponse_type**=`connect_to_agent`. This string must be no longer than 256 - characters. - """ - - def __init__(self, - response_type, - values=None, - selection_policy=None, - delimiter=None, - time=None, - typing=None, - source=None, - title=None, - description=None, - preference=None, - options=None, - message_to_human_agent=None): + def __init__( + self, + counterexamples: List['Counterexample'], + pagination: 'Pagination', + ) -> None: """ - Initialize a DialogNodeOutputGeneric object. + Initialize a CounterexampleCollection object. - :param str response_type: The type of response returned by the dialog node. The - specified response type must be supported by the client application or channel. - :param list[DialogNodeOutputTextValuesElement] values: (optional) A list of one or - more objects defining text responses. Required when **response_type**=`text`. - :param str selection_policy: (optional) How a response is selected from the list, - if more than one response is specified. Valid only when **response_type**=`text`. - :param str delimiter: (optional) The delimiter to use as a separator between - responses when `selection_policy`=`multiline`. - :param int time: (optional) How long to pause, in milliseconds. The valid values - are from 0 to 10000. Valid only when **response_type**=`pause`. - :param bool typing: (optional) Whether to send a "user is typing" event during the - pause. Ignored if the channel does not support this event. Valid only when - **response_type**=`pause`. - :param str source: (optional) The URL of the image. Required when - **response_type**=`image`. - :param str title: (optional) An optional title to show before the response. Valid - only when **response_type**=`image` or `option`. This string must be no longer - than 512 characters. - :param str description: (optional) An optional description to show with the - response. Valid only when **response_type**=`image` or `option`. This string must - be no longer than 256 characters. - :param str preference: (optional) The preferred type of control to display, if - supported by the channel. Valid only when **response_type**=`option`. - :param list[DialogNodeOutputOptionsElement] options: (optional) An array of - objects describing the options from which the user can choose. You can include up - to 20 options. Required when **response_type**=`option`. - :param str message_to_human_agent: (optional) An optional message to be sent to - the human agent who will be taking over the conversation. Valid only when - **reponse_type**=`connect_to_agent`. This string must be no longer than 256 - characters. + :param List[Counterexample] counterexamples: An array of objects describing + the examples marked as irrelevant input. + :param Pagination pagination: The pagination data for the returned objects. + For more information about using pagination, see [Pagination](#pagination). """ - self.response_type = response_type - self.values = values - self.selection_policy = selection_policy - self.delimiter = delimiter - self.time = time - self.typing = typing - self.source = source - self.title = title - self.description = description - self.preference = preference - self.options = options - self.message_to_human_agent = message_to_human_agent + self.counterexamples = counterexamples + self.pagination = pagination @classmethod - def _from_dict(cls, _dict): - """Initialize a DialogNodeOutputGeneric object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'CounterexampleCollection': + """Initialize a CounterexampleCollection object from a json dictionary.""" args = {} - if 'response_type' in _dict: - args['response_type'] = _dict.get('response_type') + if (counterexamples := _dict.get('counterexamples')) is not None: + args['counterexamples'] = [ + Counterexample.from_dict(v) for v in counterexamples + ] else: raise ValueError( - 'Required property \'response_type\' not present in DialogNodeOutputGeneric JSON' + 'Required property \'counterexamples\' not present in CounterexampleCollection JSON' + ) + if (pagination := _dict.get('pagination')) is not None: + args['pagination'] = Pagination.from_dict(pagination) + else: + raise ValueError( + 'Required property \'pagination\' not present in CounterexampleCollection JSON' ) - if 'values' in _dict: - args['values'] = [ - DialogNodeOutputTextValuesElement._from_dict(x) - for x in (_dict.get('values')) - ] - if 'selection_policy' in _dict: - args['selection_policy'] = _dict.get('selection_policy') - if 'delimiter' in _dict: - args['delimiter'] = _dict.get('delimiter') - if 'time' in _dict: - args['time'] = _dict.get('time') - if 'typing' in _dict: - args['typing'] = _dict.get('typing') - if 'source' in _dict: - args['source'] = _dict.get('source') - if 'title' in _dict: - args['title'] = _dict.get('title') - if 'description' in _dict: - args['description'] = _dict.get('description') - if 'preference' in _dict: - args['preference'] = _dict.get('preference') - if 'options' in _dict: - args['options'] = [ - DialogNodeOutputOptionsElement._from_dict(x) - for x in (_dict.get('options')) - ] - if 'message_to_human_agent' in _dict: - args['message_to_human_agent'] = _dict.get('message_to_human_agent') return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a CounterexampleCollection object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'response_type') and self.response_type is not None: - _dict['response_type'] = self.response_type - if hasattr(self, 'values') and self.values is not None: - _dict['values'] = [x._to_dict() for x in self.values] if hasattr(self, - 'selection_policy') and self.selection_policy is not None: - _dict['selection_policy'] = self.selection_policy - if hasattr(self, 'delimiter') and self.delimiter is not None: - _dict['delimiter'] = self.delimiter - if hasattr(self, 'time') and self.time is not None: - _dict['time'] = self.time - if hasattr(self, 'typing') and self.typing is not None: - _dict['typing'] = self.typing - if hasattr(self, 'source') and self.source is not None: - _dict['source'] = self.source - if hasattr(self, 'title') and self.title is not None: - _dict['title'] = self.title - if hasattr(self, 'description') and self.description is not None: - _dict['description'] = self.description - if hasattr(self, 'preference') and self.preference is not None: - _dict['preference'] = self.preference - if hasattr(self, 'options') and self.options is not None: - _dict['options'] = [x._to_dict() for x in self.options] - if hasattr(self, 'message_to_human_agent' - ) and self.message_to_human_agent is not None: - _dict['message_to_human_agent'] = self.message_to_human_agent + 'counterexamples') and self.counterexamples is not None: + counterexamples_list = [] + for v in self.counterexamples: + if isinstance(v, dict): + counterexamples_list.append(v) + else: + counterexamples_list.append(v.to_dict()) + _dict['counterexamples'] = counterexamples_list + if hasattr(self, 'pagination') and self.pagination is not None: + if isinstance(self.pagination, dict): + _dict['pagination'] = self.pagination + else: + _dict['pagination'] = self.pagination.to_dict() return _dict - def __str__(self): - """Return a `str` version of this DialogNodeOutputGeneric object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this CounterexampleCollection object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'CounterexampleCollection') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'CounterexampleCollection') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class DialogNodeOutputModifiers(object): +class CreateEntity: """ - Options that modify how specified output is handled. + CreateEntity. - :attr bool overwrite: (optional) Whether values in the output will overwrite output - values in an array specified by previously executed dialog nodes. If this option is - set to **false**, new values will be appended to previously specified values. + :param str entity: The name of the entity. This string must conform to the + following restrictions: + - It can contain only Unicode alphanumeric, underscore, and hyphen characters. + - If you specify an entity name beginning with the reserved prefix `sys-`, it + must be the name of a system entity that you want to enable. (Any entity content + specified with the request is ignored.). + :param str description: (optional) The description of the entity. This string + cannot contain carriage return, newline, or tab characters. + :param dict metadata: (optional) Any metadata related to the entity. + :param bool fuzzy_match: (optional) Whether to use fuzzy matching for the + entity. + :param datetime created: (optional) The timestamp for creation of the object. + :param datetime updated: (optional) The timestamp for the most recent update to + the object. + :param List[CreateValue] values: (optional) An array of objects describing the + entity values. """ - def __init__(self, overwrite=None): + def __init__( + self, + entity: str, + *, + description: Optional[str] = None, + metadata: Optional[dict] = None, + fuzzy_match: Optional[bool] = None, + created: Optional[datetime] = None, + updated: Optional[datetime] = None, + values: Optional[List['CreateValue']] = None, + ) -> None: """ - Initialize a DialogNodeOutputModifiers object. + Initialize a CreateEntity object. - :param bool overwrite: (optional) Whether values in the output will overwrite - output values in an array specified by previously executed dialog nodes. If this - option is set to **false**, new values will be appended to previously specified - values. + :param str entity: The name of the entity. This string must conform to the + following restrictions: + - It can contain only Unicode alphanumeric, underscore, and hyphen + characters. + - If you specify an entity name beginning with the reserved prefix `sys-`, + it must be the name of a system entity that you want to enable. (Any entity + content specified with the request is ignored.). + :param str description: (optional) The description of the entity. This + string cannot contain carriage return, newline, or tab characters. + :param dict metadata: (optional) Any metadata related to the entity. + :param bool fuzzy_match: (optional) Whether to use fuzzy matching for the + entity. + :param List[CreateValue] values: (optional) An array of objects describing + the entity values. """ - self.overwrite = overwrite + self.entity = entity + self.description = description + self.metadata = metadata + self.fuzzy_match = fuzzy_match + self.created = created + self.updated = updated + self.values = values @classmethod - def _from_dict(cls, _dict): - """Initialize a DialogNodeOutputModifiers object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'CreateEntity': + """Initialize a CreateEntity object from a json dictionary.""" args = {} - if 'overwrite' in _dict: - args['overwrite'] = _dict.get('overwrite') + if (entity := _dict.get('entity')) is not None: + args['entity'] = entity + else: + raise ValueError( + 'Required property \'entity\' not present in CreateEntity JSON') + if (description := _dict.get('description')) is not None: + args['description'] = description + if (metadata := _dict.get('metadata')) is not None: + args['metadata'] = metadata + if (fuzzy_match := _dict.get('fuzzy_match')) is not None: + args['fuzzy_match'] = fuzzy_match + if (created := _dict.get('created')) is not None: + args['created'] = string_to_datetime(created) + if (updated := _dict.get('updated')) is not None: + args['updated'] = string_to_datetime(updated) + if (values := _dict.get('values')) is not None: + args['values'] = [CreateValue.from_dict(v) for v in values] return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a CreateEntity object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'overwrite') and self.overwrite is not None: - _dict['overwrite'] = self.overwrite + if hasattr(self, 'entity') and self.entity is not None: + _dict['entity'] = self.entity + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'metadata') and self.metadata is not None: + _dict['metadata'] = self.metadata + if hasattr(self, 'fuzzy_match') and self.fuzzy_match is not None: + _dict['fuzzy_match'] = self.fuzzy_match + if hasattr(self, 'created') and getattr(self, 'created') is not None: + _dict['created'] = datetime_to_string(getattr(self, 'created')) + if hasattr(self, 'updated') and getattr(self, 'updated') is not None: + _dict['updated'] = datetime_to_string(getattr(self, 'updated')) + if hasattr(self, 'values') and self.values is not None: + values_list = [] + for v in self.values: + if isinstance(v, dict): + values_list.append(v) + else: + values_list.append(v.to_dict()) + _dict['values'] = values_list return _dict - def __str__(self): - """Return a `str` version of this DialogNodeOutputModifiers object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this CreateEntity object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'CreateEntity') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'CreateEntity') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class DialogNodeOutputOptionsElement(object): +class CreateIntent: """ - DialogNodeOutputOptionsElement. + CreateIntent. - :attr str label: The user-facing label for the option. - :attr DialogNodeOutputOptionsElementValue value: An object defining the message input - to be sent to the Watson Assistant service if the user selects the corresponding - option. + :param str intent: The name of the intent. This string must conform to the + following restrictions: + - It can contain only Unicode alphanumeric, underscore, hyphen, and dot + characters. + - It cannot begin with the reserved prefix `sys-`. + :param str description: (optional) The description of the intent. This string + cannot contain carriage return, newline, or tab characters. + :param datetime created: (optional) The timestamp for creation of the object. + :param datetime updated: (optional) The timestamp for the most recent update to + the object. + :param List[Example] examples: (optional) An array of user input examples for + the intent. """ - def __init__(self, label, value): + def __init__( + self, + intent: str, + *, + description: Optional[str] = None, + created: Optional[datetime] = None, + updated: Optional[datetime] = None, + examples: Optional[List['Example']] = None, + ) -> None: """ - Initialize a DialogNodeOutputOptionsElement object. + Initialize a CreateIntent object. - :param str label: The user-facing label for the option. - :param DialogNodeOutputOptionsElementValue value: An object defining the message - input to be sent to the Watson Assistant service if the user selects the - corresponding option. + :param str intent: The name of the intent. This string must conform to the + following restrictions: + - It can contain only Unicode alphanumeric, underscore, hyphen, and dot + characters. + - It cannot begin with the reserved prefix `sys-`. + :param str description: (optional) The description of the intent. This + string cannot contain carriage return, newline, or tab characters. + :param List[Example] examples: (optional) An array of user input examples + for the intent. """ - self.label = label - self.value = value + self.intent = intent + self.description = description + self.created = created + self.updated = updated + self.examples = examples @classmethod - def _from_dict(cls, _dict): - """Initialize a DialogNodeOutputOptionsElement object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'CreateIntent': + """Initialize a CreateIntent object from a json dictionary.""" args = {} - if 'label' in _dict: - args['label'] = _dict.get('label') - else: - raise ValueError( - 'Required property \'label\' not present in DialogNodeOutputOptionsElement JSON' - ) - if 'value' in _dict: - args['value'] = DialogNodeOutputOptionsElementValue._from_dict( - _dict.get('value')) + if (intent := _dict.get('intent')) is not None: + args['intent'] = intent else: raise ValueError( - 'Required property \'value\' not present in DialogNodeOutputOptionsElement JSON' - ) + 'Required property \'intent\' not present in CreateIntent JSON') + if (description := _dict.get('description')) is not None: + args['description'] = description + if (created := _dict.get('created')) is not None: + args['created'] = string_to_datetime(created) + if (updated := _dict.get('updated')) is not None: + args['updated'] = string_to_datetime(updated) + if (examples := _dict.get('examples')) is not None: + args['examples'] = [Example.from_dict(v) for v in examples] return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a CreateIntent object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'label') and self.label is not None: - _dict['label'] = self.label - if hasattr(self, 'value') and self.value is not None: - _dict['value'] = self.value._to_dict() + if hasattr(self, 'intent') and self.intent is not None: + _dict['intent'] = self.intent + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'created') and getattr(self, 'created') is not None: + _dict['created'] = datetime_to_string(getattr(self, 'created')) + if hasattr(self, 'updated') and getattr(self, 'updated') is not None: + _dict['updated'] = datetime_to_string(getattr(self, 'updated')) + if hasattr(self, 'examples') and self.examples is not None: + examples_list = [] + for v in self.examples: + if isinstance(v, dict): + examples_list.append(v) + else: + examples_list.append(v.to_dict()) + _dict['examples'] = examples_list return _dict - def __str__(self): - """Return a `str` version of this DialogNodeOutputOptionsElement object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this CreateIntent object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'CreateIntent') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'CreateIntent') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class DialogNodeOutputOptionsElementValue(object): +class CreateValue: """ - An object defining the message input to be sent to the Watson Assistant service if the - user selects the corresponding option. + CreateValue. - :attr MessageInput input: (optional) An input object that includes the input text. + :param str value: The text of the entity value. This string must conform to the + following restrictions: + - It cannot contain carriage return, newline, or tab characters. + - It cannot consist of only whitespace characters. + :param dict metadata: (optional) Any metadata related to the entity value. + :param str type: (optional) Specifies the type of entity value. + :param List[str] synonyms: (optional) An array of synonyms for the entity value. + A value can specify either synonyms or patterns (depending on the value type), + but not both. A synonym must conform to the following resrictions: + - It cannot contain carriage return, newline, or tab characters. + - It cannot consist of only whitespace characters. + :param List[str] patterns: (optional) An array of patterns for the entity value. + A value can specify either synonyms or patterns (depending on the value type), + but not both. A pattern is a regular expression; for more information about how + to specify a pattern, see the + [documentation](https://cloud.ibm.com/docs/assistant?topic=assistant-entities#entities-create-dictionary-based). + :param datetime created: (optional) The timestamp for creation of the object. + :param datetime updated: (optional) The timestamp for the most recent update to + the object. """ - def __init__(self, input=None): + def __init__( + self, + value: str, + *, + metadata: Optional[dict] = None, + type: Optional[str] = None, + synonyms: Optional[List[str]] = None, + patterns: Optional[List[str]] = None, + created: Optional[datetime] = None, + updated: Optional[datetime] = None, + ) -> None: """ - Initialize a DialogNodeOutputOptionsElementValue object. + Initialize a CreateValue object. - :param MessageInput input: (optional) An input object that includes the input - text. + :param str value: The text of the entity value. This string must conform to + the following restrictions: + - It cannot contain carriage return, newline, or tab characters. + - It cannot consist of only whitespace characters. + :param dict metadata: (optional) Any metadata related to the entity value. + :param str type: (optional) Specifies the type of entity value. + :param List[str] synonyms: (optional) An array of synonyms for the entity + value. A value can specify either synonyms or patterns (depending on the + value type), but not both. A synonym must conform to the following + resrictions: + - It cannot contain carriage return, newline, or tab characters. + - It cannot consist of only whitespace characters. + :param List[str] patterns: (optional) An array of patterns for the entity + value. A value can specify either synonyms or patterns (depending on the + value type), but not both. A pattern is a regular expression; for more + information about how to specify a pattern, see the + [documentation](https://cloud.ibm.com/docs/assistant?topic=assistant-entities#entities-create-dictionary-based). """ - self.input = input + self.value = value + self.metadata = metadata + self.type = type + self.synonyms = synonyms + self.patterns = patterns + self.created = created + self.updated = updated @classmethod - def _from_dict(cls, _dict): - """Initialize a DialogNodeOutputOptionsElementValue object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'CreateValue': + """Initialize a CreateValue object from a json dictionary.""" args = {} - if 'input' in _dict: - args['input'] = MessageInput._from_dict(_dict.get('input')) + if (value := _dict.get('value')) is not None: + args['value'] = value + else: + raise ValueError( + 'Required property \'value\' not present in CreateValue JSON') + if (metadata := _dict.get('metadata')) is not None: + args['metadata'] = metadata + if (type := _dict.get('type')) is not None: + args['type'] = type + if (synonyms := _dict.get('synonyms')) is not None: + args['synonyms'] = synonyms + if (patterns := _dict.get('patterns')) is not None: + args['patterns'] = patterns + if (created := _dict.get('created')) is not None: + args['created'] = string_to_datetime(created) + if (updated := _dict.get('updated')) is not None: + args['updated'] = string_to_datetime(updated) return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a CreateValue object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'input') and self.input is not None: - _dict['input'] = self.input._to_dict() + if hasattr(self, 'value') and self.value is not None: + _dict['value'] = self.value + if hasattr(self, 'metadata') and self.metadata is not None: + _dict['metadata'] = self.metadata + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'synonyms') and self.synonyms is not None: + _dict['synonyms'] = self.synonyms + if hasattr(self, 'patterns') and self.patterns is not None: + _dict['patterns'] = self.patterns + if hasattr(self, 'created') and getattr(self, 'created') is not None: + _dict['created'] = datetime_to_string(getattr(self, 'created')) + if hasattr(self, 'updated') and getattr(self, 'updated') is not None: + _dict['updated'] = datetime_to_string(getattr(self, 'updated')) return _dict - def __str__(self): - """Return a `str` version of this DialogNodeOutputOptionsElementValue object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __eq__(self, other): + def __str__(self) -> str: + """Return a `str` version of this CreateValue object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'CreateValue') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'CreateValue') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other - -class DialogNodeOutputTextValuesElement(object): - """ - DialogNodeOutputTextValuesElement. - - :attr str text: (optional) The text of a response. This string can include newline - characters (`\\n`), Markdown tagging, or other special characters, if supported by the - channel. It must be no longer than 4096 characters. - """ - - def __init__(self, text=None): + class TypeEnum(str, Enum): """ - Initialize a DialogNodeOutputTextValuesElement object. - - :param str text: (optional) The text of a response. This string can include - newline characters (`\\n`), Markdown tagging, or other special characters, if - supported by the channel. It must be no longer than 4096 characters. + Specifies the type of entity value. """ - self.text = text - - @classmethod - def _from_dict(cls, _dict): - """Initialize a DialogNodeOutputTextValuesElement object from a json dictionary.""" - args = {} - if 'text' in _dict: - args['text'] = _dict.get('text') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - return _dict - - def __str__(self): - """Return a `str` version of this DialogNodeOutputTextValuesElement object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other + SYNONYMS = 'synonyms' + PATTERNS = 'patterns' -class DialogNodeVisitedDetails(object): +class DialogNode: """ - DialogNodeVisitedDetails. + DialogNode. - :attr str dialog_node: (optional) A dialog node that was triggered during processing - of the input message. - :attr str title: (optional) The title of the dialog node. - :attr str conditions: (optional) The conditions that trigger the dialog node. + :param str dialog_node: The unique ID of the dialog node. This is an internal + identifier used to refer to the dialog node from other dialog nodes and in the + diagnostic information included with message responses. + This string can contain only Unicode alphanumeric, space, underscore, hyphen, + and dot characters. + :param str description: (optional) The description of the dialog node. This + string cannot contain carriage return, newline, or tab characters. + :param str conditions: (optional) The condition that will trigger the dialog + node. This string cannot contain carriage return, newline, or tab characters. + :param str parent: (optional) The unique ID of the parent dialog node. This + property is omitted if the dialog node has no parent. + :param str previous_sibling: (optional) The unique ID of the previous sibling + dialog node. This property is omitted if the dialog node has no previous + sibling. + :param DialogNodeOutput output: (optional) The output of the dialog node. For + more information about how to specify dialog node output, see the + [documentation](https://cloud.ibm.com/docs/assistant?topic=assistant-dialog-overview#dialog-overview-responses). + :param DialogNodeContext context: (optional) The context for the dialog node. + :param dict metadata: (optional) The metadata for the dialog node. + :param DialogNodeNextStep next_step: (optional) The next step to execute + following this dialog node. + :param str title: (optional) A human-readable name for the dialog node. If the + node is included in disambiguation, this title is used to populate the **label** + property of the corresponding suggestion in the `suggestion` response type + (unless it is overridden by the **user_label** property). The title is also used + to populate the **topic** property in the `connect_to_agent` response type. + This string can contain only Unicode alphanumeric, space, underscore, hyphen, + and dot characters. + :param str type: (optional) How the dialog node is processed. + :param str event_name: (optional) How an `event_handler` node is processed. + :param str variable: (optional) The location in the dialog context where output + is stored. + :param List[DialogNodeAction] actions: (optional) An array of objects describing + any actions to be invoked by the dialog node. + :param str digress_in: (optional) Whether this top-level dialog node can be + digressed into. + :param str digress_out: (optional) Whether this dialog node can be returned to + after a digression. + :param str digress_out_slots: (optional) Whether the user can digress to + top-level nodes while filling out slots. + :param str user_label: (optional) A label that can be displayed externally to + describe the purpose of the node to users. If set, this label is used to + identify the node in disambiguation responses (overriding the value of the + **title** property). + :param bool disambiguation_opt_out: (optional) Whether the dialog node should be + excluded from disambiguation suggestions. Valid only when **type**=`standard` or + `frame`. + :param bool disabled: (optional) For internal use only. + :param datetime created: (optional) The timestamp for creation of the object. + :param datetime updated: (optional) The timestamp for the most recent update to + the object. """ - def __init__(self, dialog_node=None, title=None, conditions=None): + def __init__( + self, + dialog_node: str, + *, + description: Optional[str] = None, + conditions: Optional[str] = None, + parent: Optional[str] = None, + previous_sibling: Optional[str] = None, + output: Optional['DialogNodeOutput'] = None, + context: Optional['DialogNodeContext'] = None, + metadata: Optional[dict] = None, + next_step: Optional['DialogNodeNextStep'] = None, + title: Optional[str] = None, + type: Optional[str] = None, + event_name: Optional[str] = None, + variable: Optional[str] = None, + actions: Optional[List['DialogNodeAction']] = None, + digress_in: Optional[str] = None, + digress_out: Optional[str] = None, + digress_out_slots: Optional[str] = None, + user_label: Optional[str] = None, + disambiguation_opt_out: Optional[bool] = None, + disabled: Optional[bool] = None, + created: Optional[datetime] = None, + updated: Optional[datetime] = None, + ) -> None: """ - Initialize a DialogNodeVisitedDetails object. + Initialize a DialogNode object. - :param str dialog_node: (optional) A dialog node that was triggered during - processing of the input message. - :param str title: (optional) The title of the dialog node. - :param str conditions: (optional) The conditions that trigger the dialog node. + :param str dialog_node: The unique ID of the dialog node. This is an + internal identifier used to refer to the dialog node from other dialog + nodes and in the diagnostic information included with message responses. + This string can contain only Unicode alphanumeric, space, underscore, + hyphen, and dot characters. + :param str description: (optional) The description of the dialog node. This + string cannot contain carriage return, newline, or tab characters. + :param str conditions: (optional) The condition that will trigger the + dialog node. This string cannot contain carriage return, newline, or tab + characters. + :param str parent: (optional) The unique ID of the parent dialog node. This + property is omitted if the dialog node has no parent. + :param str previous_sibling: (optional) The unique ID of the previous + sibling dialog node. This property is omitted if the dialog node has no + previous sibling. + :param DialogNodeOutput output: (optional) The output of the dialog node. + For more information about how to specify dialog node output, see the + [documentation](https://cloud.ibm.com/docs/assistant?topic=assistant-dialog-overview#dialog-overview-responses). + :param DialogNodeContext context: (optional) The context for the dialog + node. + :param dict metadata: (optional) The metadata for the dialog node. + :param DialogNodeNextStep next_step: (optional) The next step to execute + following this dialog node. + :param str title: (optional) A human-readable name for the dialog node. If + the node is included in disambiguation, this title is used to populate the + **label** property of the corresponding suggestion in the `suggestion` + response type (unless it is overridden by the **user_label** property). The + title is also used to populate the **topic** property in the + `connect_to_agent` response type. + This string can contain only Unicode alphanumeric, space, underscore, + hyphen, and dot characters. + :param str type: (optional) How the dialog node is processed. + :param str event_name: (optional) How an `event_handler` node is processed. + :param str variable: (optional) The location in the dialog context where + output is stored. + :param List[DialogNodeAction] actions: (optional) An array of objects + describing any actions to be invoked by the dialog node. + :param str digress_in: (optional) Whether this top-level dialog node can be + digressed into. + :param str digress_out: (optional) Whether this dialog node can be returned + to after a digression. + :param str digress_out_slots: (optional) Whether the user can digress to + top-level nodes while filling out slots. + :param str user_label: (optional) A label that can be displayed externally + to describe the purpose of the node to users. If set, this label is used to + identify the node in disambiguation responses (overriding the value of the + **title** property). + :param bool disambiguation_opt_out: (optional) Whether the dialog node + should be excluded from disambiguation suggestions. Valid only when + **type**=`standard` or `frame`. """ self.dialog_node = dialog_node - self.title = title + self.description = description self.conditions = conditions + self.parent = parent + self.previous_sibling = previous_sibling + self.output = output + self.context = context + self.metadata = metadata + self.next_step = next_step + self.title = title + self.type = type + self.event_name = event_name + self.variable = variable + self.actions = actions + self.digress_in = digress_in + self.digress_out = digress_out + self.digress_out_slots = digress_out_slots + self.user_label = user_label + self.disambiguation_opt_out = disambiguation_opt_out + self.disabled = disabled + self.created = created + self.updated = updated @classmethod - def _from_dict(cls, _dict): - """Initialize a DialogNodeVisitedDetails object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'DialogNode': + """Initialize a DialogNode object from a json dictionary.""" args = {} - if 'dialog_node' in _dict: - args['dialog_node'] = _dict.get('dialog_node') - if 'title' in _dict: - args['title'] = _dict.get('title') - if 'conditions' in _dict: - args['conditions'] = _dict.get('conditions') + if (dialog_node := _dict.get('dialog_node')) is not None: + args['dialog_node'] = dialog_node + else: + raise ValueError( + 'Required property \'dialog_node\' not present in DialogNode JSON' + ) + if (description := _dict.get('description')) is not None: + args['description'] = description + if (conditions := _dict.get('conditions')) is not None: + args['conditions'] = conditions + if (parent := _dict.get('parent')) is not None: + args['parent'] = parent + if (previous_sibling := _dict.get('previous_sibling')) is not None: + args['previous_sibling'] = previous_sibling + if (output := _dict.get('output')) is not None: + args['output'] = DialogNodeOutput.from_dict(output) + if (context := _dict.get('context')) is not None: + args['context'] = DialogNodeContext.from_dict(context) + if (metadata := _dict.get('metadata')) is not None: + args['metadata'] = metadata + if (next_step := _dict.get('next_step')) is not None: + args['next_step'] = DialogNodeNextStep.from_dict(next_step) + if (title := _dict.get('title')) is not None: + args['title'] = title + if (type := _dict.get('type')) is not None: + args['type'] = type + if (event_name := _dict.get('event_name')) is not None: + args['event_name'] = event_name + if (variable := _dict.get('variable')) is not None: + args['variable'] = variable + if (actions := _dict.get('actions')) is not None: + args['actions'] = [DialogNodeAction.from_dict(v) for v in actions] + if (digress_in := _dict.get('digress_in')) is not None: + args['digress_in'] = digress_in + if (digress_out := _dict.get('digress_out')) is not None: + args['digress_out'] = digress_out + if (digress_out_slots := _dict.get('digress_out_slots')) is not None: + args['digress_out_slots'] = digress_out_slots + if (user_label := _dict.get('user_label')) is not None: + args['user_label'] = user_label + if (disambiguation_opt_out := + _dict.get('disambiguation_opt_out')) is not None: + args['disambiguation_opt_out'] = disambiguation_opt_out + if (disabled := _dict.get('disabled')) is not None: + args['disabled'] = disabled + if (created := _dict.get('created')) is not None: + args['created'] = string_to_datetime(created) + if (updated := _dict.get('updated')) is not None: + args['updated'] = string_to_datetime(updated) return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a DialogNode object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'dialog_node') and self.dialog_node is not None: _dict['dialog_node'] = self.dialog_node - if hasattr(self, 'title') and self.title is not None: - _dict['title'] = self.title + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description if hasattr(self, 'conditions') and self.conditions is not None: _dict['conditions'] = self.conditions + if hasattr(self, 'parent') and self.parent is not None: + _dict['parent'] = self.parent + if hasattr(self, + 'previous_sibling') and self.previous_sibling is not None: + _dict['previous_sibling'] = self.previous_sibling + if hasattr(self, 'output') and self.output is not None: + if isinstance(self.output, dict): + _dict['output'] = self.output + else: + _dict['output'] = self.output.to_dict() + if hasattr(self, 'context') and self.context is not None: + if isinstance(self.context, dict): + _dict['context'] = self.context + else: + _dict['context'] = self.context.to_dict() + if hasattr(self, 'metadata') and self.metadata is not None: + _dict['metadata'] = self.metadata + if hasattr(self, 'next_step') and self.next_step is not None: + if isinstance(self.next_step, dict): + _dict['next_step'] = self.next_step + else: + _dict['next_step'] = self.next_step.to_dict() + if hasattr(self, 'title') and self.title is not None: + _dict['title'] = self.title + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'event_name') and self.event_name is not None: + _dict['event_name'] = self.event_name + if hasattr(self, 'variable') and self.variable is not None: + _dict['variable'] = self.variable + if hasattr(self, 'actions') and self.actions is not None: + actions_list = [] + for v in self.actions: + if isinstance(v, dict): + actions_list.append(v) + else: + actions_list.append(v.to_dict()) + _dict['actions'] = actions_list + if hasattr(self, 'digress_in') and self.digress_in is not None: + _dict['digress_in'] = self.digress_in + if hasattr(self, 'digress_out') and self.digress_out is not None: + _dict['digress_out'] = self.digress_out + if hasattr(self, + 'digress_out_slots') and self.digress_out_slots is not None: + _dict['digress_out_slots'] = self.digress_out_slots + if hasattr(self, 'user_label') and self.user_label is not None: + _dict['user_label'] = self.user_label + if hasattr(self, 'disambiguation_opt_out' + ) and self.disambiguation_opt_out is not None: + _dict['disambiguation_opt_out'] = self.disambiguation_opt_out + if hasattr(self, 'disabled') and getattr(self, 'disabled') is not None: + _dict['disabled'] = getattr(self, 'disabled') + if hasattr(self, 'created') and getattr(self, 'created') is not None: + _dict['created'] = datetime_to_string(getattr(self, 'created')) + if hasattr(self, 'updated') and getattr(self, 'updated') is not None: + _dict['updated'] = datetime_to_string(getattr(self, 'updated')) return _dict - def __str__(self): - """Return a `str` version of this DialogNodeVisitedDetails object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DialogNode object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'DialogNode') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'DialogNode') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other + class TypeEnum(str, Enum): + """ + How the dialog node is processed. + """ -class DialogRuntimeResponseGeneric(object): - """ - DialogRuntimeResponseGeneric. - - :attr str response_type: The type of response returned by the dialog node. The - specified response type must be supported by the client application or channel. - **Note:** The **suggestion** response type is part of the disambiguation feature, - which is only available for Premium users. - :attr str text: (optional) The text of the response. - :attr int time: (optional) How long to pause, in milliseconds. - :attr bool typing: (optional) Whether to send a "user is typing" event during the - pause. - :attr str source: (optional) The URL of the image. - :attr str title: (optional) The title or introductory text to show before the - response. - :attr str description: (optional) The description to show with the the response. - :attr str preference: (optional) The preferred type of control to display. - :attr list[DialogNodeOutputOptionsElement] options: (optional) An array of objects - describing the options from which the user can choose. - :attr str message_to_human_agent: (optional) A message to be sent to the human agent - who will be taking over the conversation. - :attr str topic: (optional) A label identifying the topic of the conversation, derived - from the **user_label** property of the relevant node. - :attr str dialog_node: (optional) The ID of the dialog node that the **topic** - property is taken from. The **topic** property is populated using the value of the - dialog node's **user_label** property. - :attr list[DialogSuggestion] suggestions: (optional) An array of objects describing - the possible matching dialog nodes from which the user can choose. - **Note:** The **suggestions** property is part of the disambiguation feature, which is - only available for Premium users. - """ - - def __init__(self, - response_type, - text=None, - time=None, - typing=None, - source=None, - title=None, - description=None, - preference=None, - options=None, - message_to_human_agent=None, - topic=None, - dialog_node=None, - suggestions=None): - """ - Initialize a DialogRuntimeResponseGeneric object. - - :param str response_type: The type of response returned by the dialog node. The - specified response type must be supported by the client application or channel. - **Note:** The **suggestion** response type is part of the disambiguation feature, - which is only available for Premium users. - :param str text: (optional) The text of the response. - :param int time: (optional) How long to pause, in milliseconds. - :param bool typing: (optional) Whether to send a "user is typing" event during the - pause. - :param str source: (optional) The URL of the image. - :param str title: (optional) The title or introductory text to show before the - response. - :param str description: (optional) The description to show with the the response. - :param str preference: (optional) The preferred type of control to display. - :param list[DialogNodeOutputOptionsElement] options: (optional) An array of - objects describing the options from which the user can choose. - :param str message_to_human_agent: (optional) A message to be sent to the human - agent who will be taking over the conversation. - :param str topic: (optional) A label identifying the topic of the conversation, - derived from the **user_label** property of the relevant node. - :param str dialog_node: (optional) The ID of the dialog node that the **topic** - property is taken from. The **topic** property is populated using the value of the - dialog node's **user_label** property. - :param list[DialogSuggestion] suggestions: (optional) An array of objects - describing the possible matching dialog nodes from which the user can choose. - **Note:** The **suggestions** property is part of the disambiguation feature, - which is only available for Premium users. + STANDARD = 'standard' + EVENT_HANDLER = 'event_handler' + FRAME = 'frame' + SLOT = 'slot' + RESPONSE_CONDITION = 'response_condition' + FOLDER = 'folder' + + class EventNameEnum(str, Enum): + """ + How an `event_handler` node is processed. """ - self.response_type = response_type - self.text = text - self.time = time - self.typing = typing - self.source = source - self.title = title - self.description = description - self.preference = preference - self.options = options - self.message_to_human_agent = message_to_human_agent - self.topic = topic - self.dialog_node = dialog_node - self.suggestions = suggestions - @classmethod + FOCUS = 'focus' + INPUT = 'input' + FILLED = 'filled' + VALIDATE = 'validate' + FILLED_MULTIPLE = 'filled_multiple' + GENERIC = 'generic' + NOMATCH = 'nomatch' + NOMATCH_RESPONSES_DEPLETED = 'nomatch_responses_depleted' + DIGRESSION_RETURN_PROMPT = 'digression_return_prompt' + + class DigressInEnum(str, Enum): + """ + Whether this top-level dialog node can be digressed into. + """ + + NOT_AVAILABLE = 'not_available' + RETURNS = 'returns' + DOES_NOT_RETURN = 'does_not_return' + + class DigressOutEnum(str, Enum): + """ + Whether this dialog node can be returned to after a digression. + """ + + ALLOW_RETURNING = 'allow_returning' + ALLOW_ALL = 'allow_all' + ALLOW_ALL_NEVER_RETURN = 'allow_all_never_return' + + class DigressOutSlotsEnum(str, Enum): + """ + Whether the user can digress to top-level nodes while filling out slots. + """ + + NOT_ALLOWED = 'not_allowed' + ALLOW_RETURNING = 'allow_returning' + ALLOW_ALL = 'allow_all' + + +class DialogNodeAction: + """ + DialogNodeAction. + + :param str name: The name of the action. + :param str type: (optional) The type of action to invoke. + :param dict parameters: (optional) A map of key/value pairs to be provided to + the action. + :param str result_variable: The location in the dialog context where the result + of the action is stored. + :param str credentials: (optional) The name of the context variable that the + client application will use to pass in credentials for the action. + """ + + def __init__( + self, + name: str, + result_variable: str, + *, + type: Optional[str] = None, + parameters: Optional[dict] = None, + credentials: Optional[str] = None, + ) -> None: + """ + Initialize a DialogNodeAction object. + + :param str name: The name of the action. + :param str result_variable: The location in the dialog context where the + result of the action is stored. + :param str type: (optional) The type of action to invoke. + :param dict parameters: (optional) A map of key/value pairs to be provided + to the action. + :param str credentials: (optional) The name of the context variable that + the client application will use to pass in credentials for the action. + """ + self.name = name + self.type = type + self.parameters = parameters + self.result_variable = result_variable + self.credentials = credentials + + @classmethod + def from_dict(cls, _dict: Dict) -> 'DialogNodeAction': + """Initialize a DialogNodeAction object from a json dictionary.""" + args = {} + if (name := _dict.get('name')) is not None: + args['name'] = name + else: + raise ValueError( + 'Required property \'name\' not present in DialogNodeAction JSON' + ) + if (type := _dict.get('type')) is not None: + args['type'] = type + if (parameters := _dict.get('parameters')) is not None: + args['parameters'] = parameters + if (result_variable := _dict.get('result_variable')) is not None: + args['result_variable'] = result_variable + else: + raise ValueError( + 'Required property \'result_variable\' not present in DialogNodeAction JSON' + ) + if (credentials := _dict.get('credentials')) is not None: + args['credentials'] = credentials + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DialogNodeAction object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'parameters') and self.parameters is not None: + _dict['parameters'] = self.parameters + if hasattr(self, + 'result_variable') and self.result_variable is not None: + _dict['result_variable'] = self.result_variable + if hasattr(self, 'credentials') and self.credentials is not None: + _dict['credentials'] = self.credentials + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DialogNodeAction object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'DialogNodeAction') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'DialogNodeAction') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class TypeEnum(str, Enum): + """ + The type of action to invoke. + """ + + CLIENT = 'client' + SERVER = 'server' + CLOUD_FUNCTION = 'cloud_function' + WEB_ACTION = 'web_action' + WEBHOOK = 'webhook' + + +class DialogNodeCollection: + """ + An array of dialog nodes. + + :param List[DialogNode] dialog_nodes: An array of objects describing the dialog + nodes defined for the workspace. + :param Pagination pagination: The pagination data for the returned objects. For + more information about using pagination, see [Pagination](#pagination). + """ + + def __init__( + self, + dialog_nodes: List['DialogNode'], + pagination: 'Pagination', + ) -> None: + """ + Initialize a DialogNodeCollection object. + + :param List[DialogNode] dialog_nodes: An array of objects describing the + dialog nodes defined for the workspace. + :param Pagination pagination: The pagination data for the returned objects. + For more information about using pagination, see [Pagination](#pagination). + """ + self.dialog_nodes = dialog_nodes + self.pagination = pagination + + @classmethod + def from_dict(cls, _dict: Dict) -> 'DialogNodeCollection': + """Initialize a DialogNodeCollection object from a json dictionary.""" + args = {} + if (dialog_nodes := _dict.get('dialog_nodes')) is not None: + args['dialog_nodes'] = [ + DialogNode.from_dict(v) for v in dialog_nodes + ] + else: + raise ValueError( + 'Required property \'dialog_nodes\' not present in DialogNodeCollection JSON' + ) + if (pagination := _dict.get('pagination')) is not None: + args['pagination'] = Pagination.from_dict(pagination) + else: + raise ValueError( + 'Required property \'pagination\' not present in DialogNodeCollection JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DialogNodeCollection object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'dialog_nodes') and self.dialog_nodes is not None: + dialog_nodes_list = [] + for v in self.dialog_nodes: + if isinstance(v, dict): + dialog_nodes_list.append(v) + else: + dialog_nodes_list.append(v.to_dict()) + _dict['dialog_nodes'] = dialog_nodes_list + if hasattr(self, 'pagination') and self.pagination is not None: + if isinstance(self.pagination, dict): + _dict['pagination'] = self.pagination + else: + _dict['pagination'] = self.pagination.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DialogNodeCollection object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'DialogNodeCollection') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'DialogNodeCollection') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class DialogNodeContext: + """ + The context for the dialog node. + + :param dict integrations: (optional) Context data intended for specific + integrations. + + This type supports additional properties of type object. Any context variable. + """ + + # The set of defined properties for the class + _properties = frozenset(['integrations']) + + def __init__( + self, + *, + integrations: Optional[dict] = None, + **kwargs: Optional[object], + ) -> None: + """ + Initialize a DialogNodeContext object. + + :param dict integrations: (optional) Context data intended for specific + integrations. + :param object **kwargs: (optional) Any context variable. + """ + self.integrations = integrations + for k, v in kwargs.items(): + if k not in DialogNodeContext._properties: + if not isinstance(v, object): + raise ValueError( + 'Value for additional property {} must be of type object' + .format(k)) + setattr(self, k, v) + else: + raise ValueError( + 'Property {} cannot be specified as an additional property'. + format(k)) + + @classmethod + def from_dict(cls, _dict: Dict) -> 'DialogNodeContext': + """Initialize a DialogNodeContext object from a json dictionary.""" + args = {} + if (integrations := _dict.get('integrations')) is not None: + args['integrations'] = integrations + for k, v in _dict.items(): + if k not in cls._properties: + if not isinstance(v, object): + raise ValueError( + 'Value for additional property {} must be of type object' + .format(k)) + args[k] = v + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DialogNodeContext object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'integrations') and self.integrations is not None: + _dict['integrations'] = self.integrations + for k in [ + _k for _k in vars(self).keys() + if _k not in DialogNodeContext._properties + ]: + _dict[k] = getattr(self, k) + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def get_properties(self) -> Dict: + """Return the additional properties from this instance of DialogNodeContext in the form of a dict.""" + _dict = {} + for k in [ + _k for _k in vars(self).keys() + if _k not in DialogNodeContext._properties + ]: + _dict[k] = getattr(self, k) + return _dict + + def set_properties(self, _dict: dict): + """Set a dictionary of additional properties in this instance of DialogNodeContext""" + for k in [ + _k for _k in vars(self).keys() + if _k not in DialogNodeContext._properties + ]: + delattr(self, k) + for k, v in _dict.items(): + if k not in DialogNodeContext._properties: + if not isinstance(v, object): + raise ValueError( + 'Value for additional property {} must be of type object' + .format(k)) + setattr(self, k, v) + else: + raise ValueError( + 'Property {} cannot be specified as an additional property'. + format(k)) + + def __str__(self) -> str: + """Return a `str` version of this DialogNodeContext object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'DialogNodeContext') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'DialogNodeContext') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class DialogNodeNextStep: + """ + The next step to execute following this dialog node. + + :param str behavior: What happens after the dialog node completes. The valid + values depend on the node type: + - The following values are valid for any node: + - `get_user_input` + - `skip_user_input` + - `jump_to` + - If the node is of type `event_handler` and its parent node is of type `slot` + or `frame`, additional values are also valid: + - if **event_name**=`filled` and the type of the parent node is `slot`: + - `reprompt` + - `skip_all_slots` + - if **event_name**=`nomatch` and the type of the parent node is `slot`: + - `reprompt` + - `skip_slot` + - `skip_all_slots` + - if **event_name**=`generic` and the type of the parent node is `frame`: + - `reprompt` + - `skip_slot` + - `skip_all_slots` + If you specify `jump_to`, then you must also specify a value for the + `dialog_node` property. + :param str dialog_node: (optional) The unique ID of the dialog node to process + next. This parameter is required if **behavior**=`jump_to`. + :param str selector: (optional) Which part of the dialog node to process next. + """ + + def __init__( + self, + behavior: str, + *, + dialog_node: Optional[str] = None, + selector: Optional[str] = None, + ) -> None: + """ + Initialize a DialogNodeNextStep object. + + :param str behavior: What happens after the dialog node completes. The + valid values depend on the node type: + - The following values are valid for any node: + - `get_user_input` + - `skip_user_input` + - `jump_to` + - If the node is of type `event_handler` and its parent node is of type + `slot` or `frame`, additional values are also valid: + - if **event_name**=`filled` and the type of the parent node is `slot`: + - `reprompt` + - `skip_all_slots` + - if **event_name**=`nomatch` and the type of the parent node is `slot`: + - `reprompt` + - `skip_slot` + - `skip_all_slots` + - if **event_name**=`generic` and the type of the parent node is `frame`: + - `reprompt` + - `skip_slot` + - `skip_all_slots` + If you specify `jump_to`, then you must also specify a value for the + `dialog_node` property. + :param str dialog_node: (optional) The unique ID of the dialog node to + process next. This parameter is required if **behavior**=`jump_to`. + :param str selector: (optional) Which part of the dialog node to process + next. + """ + self.behavior = behavior + self.dialog_node = dialog_node + self.selector = selector + + @classmethod + def from_dict(cls, _dict: Dict) -> 'DialogNodeNextStep': + """Initialize a DialogNodeNextStep object from a json dictionary.""" + args = {} + if (behavior := _dict.get('behavior')) is not None: + args['behavior'] = behavior + else: + raise ValueError( + 'Required property \'behavior\' not present in DialogNodeNextStep JSON' + ) + if (dialog_node := _dict.get('dialog_node')) is not None: + args['dialog_node'] = dialog_node + if (selector := _dict.get('selector')) is not None: + args['selector'] = selector + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DialogNodeNextStep object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'behavior') and self.behavior is not None: + _dict['behavior'] = self.behavior + if hasattr(self, 'dialog_node') and self.dialog_node is not None: + _dict['dialog_node'] = self.dialog_node + if hasattr(self, 'selector') and self.selector is not None: + _dict['selector'] = self.selector + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DialogNodeNextStep object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'DialogNodeNextStep') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'DialogNodeNextStep') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class BehaviorEnum(str, Enum): + """ + What happens after the dialog node completes. The valid values depend on the node + type: + - The following values are valid for any node: + - `get_user_input` + - `skip_user_input` + - `jump_to` + - If the node is of type `event_handler` and its parent node is of type `slot` or + `frame`, additional values are also valid: + - if **event_name**=`filled` and the type of the parent node is `slot`: + - `reprompt` + - `skip_all_slots` + - if **event_name**=`nomatch` and the type of the parent node is `slot`: + - `reprompt` + - `skip_slot` + - `skip_all_slots` + - if **event_name**=`generic` and the type of the parent node is `frame`: + - `reprompt` + - `skip_slot` + - `skip_all_slots` + If you specify `jump_to`, then you must also specify a value for the + `dialog_node` property. + """ + + GET_USER_INPUT = 'get_user_input' + SKIP_USER_INPUT = 'skip_user_input' + JUMP_TO = 'jump_to' + REPROMPT = 'reprompt' + SKIP_SLOT = 'skip_slot' + SKIP_ALL_SLOTS = 'skip_all_slots' + + class SelectorEnum(str, Enum): + """ + Which part of the dialog node to process next. + """ + + CONDITION = 'condition' + CLIENT = 'client' + USER_INPUT = 'user_input' + BODY = 'body' + + +class DialogNodeOutput: + """ + The output of the dialog node. For more information about how to specify dialog node + output, see the + [documentation](https://cloud.ibm.com/docs/assistant?topic=assistant-dialog-overview#dialog-overview-responses). + + :param List[DialogNodeOutputGeneric] generic: (optional) An array of objects + describing the output defined for the dialog node. + :param dict integrations: (optional) Output intended for specific integrations. + For more information, see the + [documentation](https://cloud.ibm.com/docs/assistant?topic=assistant-dialog-responses-json). + :param DialogNodeOutputModifiers modifiers: (optional) Options that modify how + specified output is handled. + + This type supports additional properties of type object. Any additional data included + in the dialog node output. + """ + + # The set of defined properties for the class + _properties = frozenset(['generic', 'integrations', 'modifiers']) + + def __init__( + self, + *, + generic: Optional[List['DialogNodeOutputGeneric']] = None, + integrations: Optional[dict] = None, + modifiers: Optional['DialogNodeOutputModifiers'] = None, + **kwargs: Optional[object], + ) -> None: + """ + Initialize a DialogNodeOutput object. + + :param List[DialogNodeOutputGeneric] generic: (optional) An array of + objects describing the output defined for the dialog node. + :param dict integrations: (optional) Output intended for specific + integrations. For more information, see the + [documentation](https://cloud.ibm.com/docs/assistant?topic=assistant-dialog-responses-json). + :param DialogNodeOutputModifiers modifiers: (optional) Options that modify + how specified output is handled. + :param object **kwargs: (optional) Any additional data included in the + dialog node output. + """ + self.generic = generic + self.integrations = integrations + self.modifiers = modifiers + for k, v in kwargs.items(): + if k not in DialogNodeOutput._properties: + if not isinstance(v, object): + raise ValueError( + 'Value for additional property {} must be of type object' + .format(k)) + setattr(self, k, v) + else: + raise ValueError( + 'Property {} cannot be specified as an additional property'. + format(k)) + + @classmethod + def from_dict(cls, _dict: Dict) -> 'DialogNodeOutput': + """Initialize a DialogNodeOutput object from a json dictionary.""" + args = {} + if (generic := _dict.get('generic')) is not None: + args['generic'] = [ + DialogNodeOutputGeneric.from_dict(v) for v in generic + ] + if (integrations := _dict.get('integrations')) is not None: + args['integrations'] = integrations + if (modifiers := _dict.get('modifiers')) is not None: + args['modifiers'] = DialogNodeOutputModifiers.from_dict(modifiers) + for k, v in _dict.items(): + if k not in cls._properties: + if not isinstance(v, object): + raise ValueError( + 'Value for additional property {} must be of type object' + .format(k)) + args[k] = v + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DialogNodeOutput object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'generic') and self.generic is not None: + generic_list = [] + for v in self.generic: + if isinstance(v, dict): + generic_list.append(v) + else: + generic_list.append(v.to_dict()) + _dict['generic'] = generic_list + if hasattr(self, 'integrations') and self.integrations is not None: + _dict['integrations'] = self.integrations + if hasattr(self, 'modifiers') and self.modifiers is not None: + if isinstance(self.modifiers, dict): + _dict['modifiers'] = self.modifiers + else: + _dict['modifiers'] = self.modifiers.to_dict() + for k in [ + _k for _k in vars(self).keys() + if _k not in DialogNodeOutput._properties + ]: + _dict[k] = getattr(self, k) + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def get_properties(self) -> Dict: + """Return the additional properties from this instance of DialogNodeOutput in the form of a dict.""" + _dict = {} + for k in [ + _k for _k in vars(self).keys() + if _k not in DialogNodeOutput._properties + ]: + _dict[k] = getattr(self, k) + return _dict + + def set_properties(self, _dict: dict): + """Set a dictionary of additional properties in this instance of DialogNodeOutput""" + for k in [ + _k for _k in vars(self).keys() + if _k not in DialogNodeOutput._properties + ]: + delattr(self, k) + for k, v in _dict.items(): + if k not in DialogNodeOutput._properties: + if not isinstance(v, object): + raise ValueError( + 'Value for additional property {} must be of type object' + .format(k)) + setattr(self, k, v) + else: + raise ValueError( + 'Property {} cannot be specified as an additional property'. + format(k)) + + def __str__(self) -> str: + """Return a `str` version of this DialogNodeOutput object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'DialogNodeOutput') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'DialogNodeOutput') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class DialogNodeOutputConnectToAgentTransferInfo: + """ + Routing or other contextual information to be used by target service desk systems. + + :param dict target: (optional) + """ + + def __init__( + self, + *, + target: Optional[dict] = None, + ) -> None: + """ + Initialize a DialogNodeOutputConnectToAgentTransferInfo object. + + :param dict target: (optional) + """ + self.target = target + + @classmethod + def from_dict(cls, + _dict: Dict) -> 'DialogNodeOutputConnectToAgentTransferInfo': + """Initialize a DialogNodeOutputConnectToAgentTransferInfo object from a json dictionary.""" + args = {} + if (target := _dict.get('target')) is not None: + args['target'] = target + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DialogNodeOutputConnectToAgentTransferInfo object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'target') and self.target is not None: + _dict['target'] = self.target + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DialogNodeOutputConnectToAgentTransferInfo object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, + other: 'DialogNodeOutputConnectToAgentTransferInfo') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, + other: 'DialogNodeOutputConnectToAgentTransferInfo') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class DialogNodeOutputGeneric: + """ + DialogNodeOutputGeneric. + + """ + + def __init__(self,) -> None: + """ + Initialize a DialogNodeOutputGeneric object. + + """ + msg = "Cannot instantiate base class. Instead, instantiate one of the defined subclasses: {0}".format( + ", ".join([ + 'DialogNodeOutputGenericDialogNodeOutputResponseTypeText', + 'DialogNodeOutputGenericDialogNodeOutputResponseTypePause', + 'DialogNodeOutputGenericDialogNodeOutputResponseTypeImage', + 'DialogNodeOutputGenericDialogNodeOutputResponseTypeOption', + 'DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent', + 'DialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill', + 'DialogNodeOutputGenericDialogNodeOutputResponseTypeChannelTransfer', + 'DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined', + 'DialogNodeOutputGenericDialogNodeOutputResponseTypeVideo', + 'DialogNodeOutputGenericDialogNodeOutputResponseTypeAudio', + 'DialogNodeOutputGenericDialogNodeOutputResponseTypeIframe' + ])) + raise Exception(msg) + + @classmethod + def from_dict(cls, _dict: Dict) -> 'DialogNodeOutputGeneric': + """Initialize a DialogNodeOutputGeneric object from a json dictionary.""" + disc_class = cls._get_class_by_discriminator(_dict) + if disc_class != cls: + return disc_class.from_dict(_dict) + msg = "Cannot convert dictionary into an instance of base class 'DialogNodeOutputGeneric'. The discriminator value should map to a valid subclass: {1}".format( + ", ".join([ + 'DialogNodeOutputGenericDialogNodeOutputResponseTypeText', + 'DialogNodeOutputGenericDialogNodeOutputResponseTypePause', + 'DialogNodeOutputGenericDialogNodeOutputResponseTypeImage', + 'DialogNodeOutputGenericDialogNodeOutputResponseTypeOption', + 'DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent', + 'DialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill', + 'DialogNodeOutputGenericDialogNodeOutputResponseTypeChannelTransfer', + 'DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined', + 'DialogNodeOutputGenericDialogNodeOutputResponseTypeVideo', + 'DialogNodeOutputGenericDialogNodeOutputResponseTypeAudio', + 'DialogNodeOutputGenericDialogNodeOutputResponseTypeIframe' + ])) + raise Exception(msg) + + @classmethod + def _from_dict(cls, _dict: Dict): + """Initialize a DialogNodeOutputGeneric object from a json dictionary.""" + return cls.from_dict(_dict) + + @classmethod + def _get_class_by_discriminator(cls, _dict: Dict) -> object: + mapping = {} + mapping[ + 'audio'] = 'DialogNodeOutputGenericDialogNodeOutputResponseTypeAudio' + mapping[ + 'channel_transfer'] = 'DialogNodeOutputGenericDialogNodeOutputResponseTypeChannelTransfer' + mapping[ + 'connect_to_agent'] = 'DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent' + mapping[ + 'iframe'] = 'DialogNodeOutputGenericDialogNodeOutputResponseTypeIframe' + mapping[ + 'image'] = 'DialogNodeOutputGenericDialogNodeOutputResponseTypeImage' + mapping[ + 'option'] = 'DialogNodeOutputGenericDialogNodeOutputResponseTypeOption' + mapping[ + 'pause'] = 'DialogNodeOutputGenericDialogNodeOutputResponseTypePause' + mapping[ + 'search_skill'] = 'DialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill' + mapping[ + 'text'] = 'DialogNodeOutputGenericDialogNodeOutputResponseTypeText' + mapping[ + 'user_defined'] = 'DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined' + mapping[ + 'video'] = 'DialogNodeOutputGenericDialogNodeOutputResponseTypeVideo' + disc_value = _dict.get('response_type') + if disc_value is None: + raise ValueError( + 'Discriminator property \'response_type\' not found in DialogNodeOutputGeneric JSON' + ) + class_name = mapping.get(disc_value, disc_value) + try: + disc_class = getattr(sys.modules[__name__], class_name) + except AttributeError: + disc_class = cls + if isinstance(disc_class, object): + return disc_class + raise TypeError('%s is not a discriminator class' % class_name) + + +class DialogNodeOutputModifiers: + """ + Options that modify how specified output is handled. + + :param bool overwrite: (optional) Whether values in the output will overwrite + output values in an array specified by previously executed dialog nodes. If this + option is set to `false`, new values will be appended to previously specified + values. + """ + + def __init__( + self, + *, + overwrite: Optional[bool] = None, + ) -> None: + """ + Initialize a DialogNodeOutputModifiers object. + + :param bool overwrite: (optional) Whether values in the output will + overwrite output values in an array specified by previously executed dialog + nodes. If this option is set to `false`, new values will be appended to + previously specified values. + """ + self.overwrite = overwrite + + @classmethod + def from_dict(cls, _dict: Dict) -> 'DialogNodeOutputModifiers': + """Initialize a DialogNodeOutputModifiers object from a json dictionary.""" + args = {} + if (overwrite := _dict.get('overwrite')) is not None: + args['overwrite'] = overwrite + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DialogNodeOutputModifiers object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'overwrite') and self.overwrite is not None: + _dict['overwrite'] = self.overwrite + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DialogNodeOutputModifiers object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'DialogNodeOutputModifiers') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'DialogNodeOutputModifiers') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class DialogNodeOutputOptionsElement: + """ + DialogNodeOutputOptionsElement. + + :param str label: The user-facing label for the option. + :param DialogNodeOutputOptionsElementValue value: An object defining the message + input to be sent to the Watson Assistant service if the user selects the + corresponding option. + """ + + def __init__( + self, + label: str, + value: 'DialogNodeOutputOptionsElementValue', + ) -> None: + """ + Initialize a DialogNodeOutputOptionsElement object. + + :param str label: The user-facing label for the option. + :param DialogNodeOutputOptionsElementValue value: An object defining the + message input to be sent to the Watson Assistant service if the user + selects the corresponding option. + """ + self.label = label + self.value = value + + @classmethod + def from_dict(cls, _dict: Dict) -> 'DialogNodeOutputOptionsElement': + """Initialize a DialogNodeOutputOptionsElement object from a json dictionary.""" + args = {} + if (label := _dict.get('label')) is not None: + args['label'] = label + else: + raise ValueError( + 'Required property \'label\' not present in DialogNodeOutputOptionsElement JSON' + ) + if (value := _dict.get('value')) is not None: + args['value'] = DialogNodeOutputOptionsElementValue.from_dict(value) + else: + raise ValueError( + 'Required property \'value\' not present in DialogNodeOutputOptionsElement JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DialogNodeOutputOptionsElement object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'label') and self.label is not None: + _dict['label'] = self.label + if hasattr(self, 'value') and self.value is not None: + if isinstance(self.value, dict): + _dict['value'] = self.value + else: + _dict['value'] = self.value.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DialogNodeOutputOptionsElement object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'DialogNodeOutputOptionsElement') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'DialogNodeOutputOptionsElement') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class DialogNodeOutputOptionsElementValue: + """ + An object defining the message input to be sent to the Watson Assistant service if the + user selects the corresponding option. + + :param MessageInput input: (optional) An input object that includes the input + text. + :param List[RuntimeIntent] intents: (optional) An array of intents to be used + while processing the input. + **Note:** This property is supported for backward compatibility with + applications that use the v1 **Get response to user input** method. + :param List[RuntimeEntity] entities: (optional) An array of entities to be used + while processing the user input. + **Note:** This property is supported for backward compatibility with + applications that use the v1 **Get response to user input** method. + """ + + def __init__( + self, + *, + input: Optional['MessageInput'] = None, + intents: Optional[List['RuntimeIntent']] = None, + entities: Optional[List['RuntimeEntity']] = None, + ) -> None: + """ + Initialize a DialogNodeOutputOptionsElementValue object. + + :param MessageInput input: (optional) An input object that includes the + input text. + :param List[RuntimeIntent] intents: (optional) An array of intents to be + used while processing the input. + **Note:** This property is supported for backward compatibility with + applications that use the v1 **Get response to user input** method. + :param List[RuntimeEntity] entities: (optional) An array of entities to be + used while processing the user input. + **Note:** This property is supported for backward compatibility with + applications that use the v1 **Get response to user input** method. + """ + self.input = input + self.intents = intents + self.entities = entities + + @classmethod + def from_dict(cls, _dict: Dict) -> 'DialogNodeOutputOptionsElementValue': + """Initialize a DialogNodeOutputOptionsElementValue object from a json dictionary.""" + args = {} + if (input := _dict.get('input')) is not None: + args['input'] = MessageInput.from_dict(input) + if (intents := _dict.get('intents')) is not None: + args['intents'] = [RuntimeIntent.from_dict(v) for v in intents] + if (entities := _dict.get('entities')) is not None: + args['entities'] = [RuntimeEntity.from_dict(v) for v in entities] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DialogNodeOutputOptionsElementValue object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'input') and self.input is not None: + if isinstance(self.input, dict): + _dict['input'] = self.input + else: + _dict['input'] = self.input.to_dict() + if hasattr(self, 'intents') and self.intents is not None: + intents_list = [] + for v in self.intents: + if isinstance(v, dict): + intents_list.append(v) + else: + intents_list.append(v.to_dict()) + _dict['intents'] = intents_list + if hasattr(self, 'entities') and self.entities is not None: + entities_list = [] + for v in self.entities: + if isinstance(v, dict): + entities_list.append(v) + else: + entities_list.append(v.to_dict()) + _dict['entities'] = entities_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DialogNodeOutputOptionsElementValue object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'DialogNodeOutputOptionsElementValue') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'DialogNodeOutputOptionsElementValue') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class DialogNodeOutputTextValuesElement: + """ + DialogNodeOutputTextValuesElement. + + :param str text: (optional) The text of a response. This string can include + newline characters (`\n`), Markdown tagging, or other special characters, if + supported by the channel. + """ + + def __init__( + self, + *, + text: Optional[str] = None, + ) -> None: + """ + Initialize a DialogNodeOutputTextValuesElement object. + + :param str text: (optional) The text of a response. This string can include + newline characters (`\n`), Markdown tagging, or other special characters, + if supported by the channel. + """ + self.text = text + + @classmethod + def from_dict(cls, _dict: Dict) -> 'DialogNodeOutputTextValuesElement': + """Initialize a DialogNodeOutputTextValuesElement object from a json dictionary.""" + args = {} + if (text := _dict.get('text')) is not None: + args['text'] = text + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DialogNodeOutputTextValuesElement object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'text') and self.text is not None: + _dict['text'] = self.text + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DialogNodeOutputTextValuesElement object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'DialogNodeOutputTextValuesElement') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'DialogNodeOutputTextValuesElement') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class DialogNodeVisitedDetails: + """ + DialogNodeVisitedDetails. + + :param str dialog_node: (optional) The unique ID of a dialog node that was + triggered during processing of the input message. + :param str title: (optional) The title of the dialog node. + :param str conditions: (optional) The conditions that trigger the dialog node. + """ + + def __init__( + self, + *, + dialog_node: Optional[str] = None, + title: Optional[str] = None, + conditions: Optional[str] = None, + ) -> None: + """ + Initialize a DialogNodeVisitedDetails object. + + :param str dialog_node: (optional) The unique ID of a dialog node that was + triggered during processing of the input message. + :param str title: (optional) The title of the dialog node. + :param str conditions: (optional) The conditions that trigger the dialog + node. + """ + self.dialog_node = dialog_node + self.title = title + self.conditions = conditions + + @classmethod + def from_dict(cls, _dict: Dict) -> 'DialogNodeVisitedDetails': + """Initialize a DialogNodeVisitedDetails object from a json dictionary.""" + args = {} + if (dialog_node := _dict.get('dialog_node')) is not None: + args['dialog_node'] = dialog_node + if (title := _dict.get('title')) is not None: + args['title'] = title + if (conditions := _dict.get('conditions')) is not None: + args['conditions'] = conditions + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DialogNodeVisitedDetails object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'dialog_node') and self.dialog_node is not None: + _dict['dialog_node'] = self.dialog_node + if hasattr(self, 'title') and self.title is not None: + _dict['title'] = self.title + if hasattr(self, 'conditions') and self.conditions is not None: + _dict['conditions'] = self.conditions + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DialogNodeVisitedDetails object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'DialogNodeVisitedDetails') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'DialogNodeVisitedDetails') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class DialogSuggestion: + """ + DialogSuggestion. + + :param str label: The user-facing label for the disambiguation option. This + label is taken from the **title** or **user_label** property of the + corresponding dialog node. + :param DialogSuggestionValue value: An object defining the message input, + intents, and entities to be sent to the Watson Assistant service if the user + selects the corresponding disambiguation option. + **Note:** These properties must be included in the request body of the next + message sent to the assistant. Do not modify or remove any of the included + properties. + :param dict output: (optional) The dialog output that will be returned from the + Watson Assistant service if the user selects the corresponding option. + :param str dialog_node: (optional) The unique ID of the dialog node that the + **label** property is taken from. The **label** property is populated using the + value of the dialog node's **title** or **user_label** property. + """ + + def __init__( + self, + label: str, + value: 'DialogSuggestionValue', + *, + output: Optional[dict] = None, + dialog_node: Optional[str] = None, + ) -> None: + """ + Initialize a DialogSuggestion object. + + :param str label: The user-facing label for the disambiguation option. This + label is taken from the **title** or **user_label** property of the + corresponding dialog node. + :param DialogSuggestionValue value: An object defining the message input, + intents, and entities to be sent to the Watson Assistant service if the + user selects the corresponding disambiguation option. + **Note:** These properties must be included in the request body of the + next message sent to the assistant. Do not modify or remove any of the + included properties. + :param dict output: (optional) The dialog output that will be returned from + the Watson Assistant service if the user selects the corresponding option. + :param str dialog_node: (optional) The unique ID of the dialog node that + the **label** property is taken from. The **label** property is populated + using the value of the dialog node's **title** or **user_label** property. + """ + self.label = label + self.value = value + self.output = output + self.dialog_node = dialog_node + + @classmethod + def from_dict(cls, _dict: Dict) -> 'DialogSuggestion': + """Initialize a DialogSuggestion object from a json dictionary.""" + args = {} + if (label := _dict.get('label')) is not None: + args['label'] = label + else: + raise ValueError( + 'Required property \'label\' not present in DialogSuggestion JSON' + ) + if (value := _dict.get('value')) is not None: + args['value'] = DialogSuggestionValue.from_dict(value) + else: + raise ValueError( + 'Required property \'value\' not present in DialogSuggestion JSON' + ) + if (output := _dict.get('output')) is not None: + args['output'] = output + if (dialog_node := _dict.get('dialog_node')) is not None: + args['dialog_node'] = dialog_node + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DialogSuggestion object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'label') and self.label is not None: + _dict['label'] = self.label + if hasattr(self, 'value') and self.value is not None: + if isinstance(self.value, dict): + _dict['value'] = self.value + else: + _dict['value'] = self.value.to_dict() + if hasattr(self, 'output') and self.output is not None: + _dict['output'] = self.output + if hasattr(self, 'dialog_node') and self.dialog_node is not None: + _dict['dialog_node'] = self.dialog_node + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DialogSuggestion object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'DialogSuggestion') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'DialogSuggestion') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class DialogSuggestionValue: + """ + An object defining the message input, intents, and entities to be sent to the Watson + Assistant service if the user selects the corresponding disambiguation option. + **Note:** These properties must be included in the request body of the next message + sent to the assistant. Do not modify or remove any of the included properties. + + :param MessageInput input: (optional) An input object that includes the input + text. + :param List[RuntimeIntent] intents: (optional) An array of intents to be sent + along with the user input. + :param List[RuntimeEntity] entities: (optional) An array of entities to be sent + along with the user input. + """ + + def __init__( + self, + *, + input: Optional['MessageInput'] = None, + intents: Optional[List['RuntimeIntent']] = None, + entities: Optional[List['RuntimeEntity']] = None, + ) -> None: + """ + Initialize a DialogSuggestionValue object. + + :param MessageInput input: (optional) An input object that includes the + input text. + :param List[RuntimeIntent] intents: (optional) An array of intents to be + sent along with the user input. + :param List[RuntimeEntity] entities: (optional) An array of entities to be + sent along with the user input. + """ + self.input = input + self.intents = intents + self.entities = entities + + @classmethod + def from_dict(cls, _dict: Dict) -> 'DialogSuggestionValue': + """Initialize a DialogSuggestionValue object from a json dictionary.""" + args = {} + if (input := _dict.get('input')) is not None: + args['input'] = MessageInput.from_dict(input) + if (intents := _dict.get('intents')) is not None: + args['intents'] = [RuntimeIntent.from_dict(v) for v in intents] + if (entities := _dict.get('entities')) is not None: + args['entities'] = [RuntimeEntity.from_dict(v) for v in entities] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DialogSuggestionValue object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'input') and self.input is not None: + if isinstance(self.input, dict): + _dict['input'] = self.input + else: + _dict['input'] = self.input.to_dict() + if hasattr(self, 'intents') and self.intents is not None: + intents_list = [] + for v in self.intents: + if isinstance(v, dict): + intents_list.append(v) + else: + intents_list.append(v.to_dict()) + _dict['intents'] = intents_list + if hasattr(self, 'entities') and self.entities is not None: + entities_list = [] + for v in self.entities: + if isinstance(v, dict): + entities_list.append(v) + else: + entities_list.append(v.to_dict()) + _dict['entities'] = entities_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DialogSuggestionValue object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'DialogSuggestionValue') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'DialogSuggestionValue') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Entity: + """ + Entity. + + :param str entity: The name of the entity. This string must conform to the + following restrictions: + - It can contain only Unicode alphanumeric, underscore, and hyphen characters. + - If you specify an entity name beginning with the reserved prefix `sys-`, it + must be the name of a system entity that you want to enable. (Any entity content + specified with the request is ignored.). + :param str description: (optional) The description of the entity. This string + cannot contain carriage return, newline, or tab characters. + :param dict metadata: (optional) Any metadata related to the entity. + :param bool fuzzy_match: (optional) Whether to use fuzzy matching for the + entity. + :param datetime created: (optional) The timestamp for creation of the object. + :param datetime updated: (optional) The timestamp for the most recent update to + the object. + :param List[Value] values: (optional) An array of objects describing the entity + values. + """ + + def __init__( + self, + entity: str, + *, + description: Optional[str] = None, + metadata: Optional[dict] = None, + fuzzy_match: Optional[bool] = None, + created: Optional[datetime] = None, + updated: Optional[datetime] = None, + values: Optional[List['Value']] = None, + ) -> None: + """ + Initialize a Entity object. + + :param str entity: The name of the entity. This string must conform to the + following restrictions: + - It can contain only Unicode alphanumeric, underscore, and hyphen + characters. + - If you specify an entity name beginning with the reserved prefix `sys-`, + it must be the name of a system entity that you want to enable. (Any entity + content specified with the request is ignored.). + :param str description: (optional) The description of the entity. This + string cannot contain carriage return, newline, or tab characters. + :param dict metadata: (optional) Any metadata related to the entity. + :param bool fuzzy_match: (optional) Whether to use fuzzy matching for the + entity. + :param List[Value] values: (optional) An array of objects describing the + entity values. + """ + self.entity = entity + self.description = description + self.metadata = metadata + self.fuzzy_match = fuzzy_match + self.created = created + self.updated = updated + self.values = values + + @classmethod + def from_dict(cls, _dict: Dict) -> 'Entity': + """Initialize a Entity object from a json dictionary.""" + args = {} + if (entity := _dict.get('entity')) is not None: + args['entity'] = entity + else: + raise ValueError( + 'Required property \'entity\' not present in Entity JSON') + if (description := _dict.get('description')) is not None: + args['description'] = description + if (metadata := _dict.get('metadata')) is not None: + args['metadata'] = metadata + if (fuzzy_match := _dict.get('fuzzy_match')) is not None: + args['fuzzy_match'] = fuzzy_match + if (created := _dict.get('created')) is not None: + args['created'] = string_to_datetime(created) + if (updated := _dict.get('updated')) is not None: + args['updated'] = string_to_datetime(updated) + if (values := _dict.get('values')) is not None: + args['values'] = [Value.from_dict(v) for v in values] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Entity object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'entity') and self.entity is not None: + _dict['entity'] = self.entity + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'metadata') and self.metadata is not None: + _dict['metadata'] = self.metadata + if hasattr(self, 'fuzzy_match') and self.fuzzy_match is not None: + _dict['fuzzy_match'] = self.fuzzy_match + if hasattr(self, 'created') and getattr(self, 'created') is not None: + _dict['created'] = datetime_to_string(getattr(self, 'created')) + if hasattr(self, 'updated') and getattr(self, 'updated') is not None: + _dict['updated'] = datetime_to_string(getattr(self, 'updated')) + if hasattr(self, 'values') and self.values is not None: + values_list = [] + for v in self.values: + if isinstance(v, dict): + values_list.append(v) + else: + values_list.append(v.to_dict()) + _dict['values'] = values_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Entity object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'Entity') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'Entity') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class EntityCollection: + """ + An array of objects describing the entities for the workspace. + + :param List[Entity] entities: An array of objects describing the entities + defined for the workspace. + :param Pagination pagination: The pagination data for the returned objects. For + more information about using pagination, see [Pagination](#pagination). + """ + + def __init__( + self, + entities: List['Entity'], + pagination: 'Pagination', + ) -> None: + """ + Initialize a EntityCollection object. + + :param List[Entity] entities: An array of objects describing the entities + defined for the workspace. + :param Pagination pagination: The pagination data for the returned objects. + For more information about using pagination, see [Pagination](#pagination). + """ + self.entities = entities + self.pagination = pagination + + @classmethod + def from_dict(cls, _dict: Dict) -> 'EntityCollection': + """Initialize a EntityCollection object from a json dictionary.""" + args = {} + if (entities := _dict.get('entities')) is not None: + args['entities'] = [Entity.from_dict(v) for v in entities] + else: + raise ValueError( + 'Required property \'entities\' not present in EntityCollection JSON' + ) + if (pagination := _dict.get('pagination')) is not None: + args['pagination'] = Pagination.from_dict(pagination) + else: + raise ValueError( + 'Required property \'pagination\' not present in EntityCollection JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a EntityCollection object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'entities') and self.entities is not None: + entities_list = [] + for v in self.entities: + if isinstance(v, dict): + entities_list.append(v) + else: + entities_list.append(v.to_dict()) + _dict['entities'] = entities_list + if hasattr(self, 'pagination') and self.pagination is not None: + if isinstance(self.pagination, dict): + _dict['pagination'] = self.pagination + else: + _dict['pagination'] = self.pagination.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this EntityCollection object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'EntityCollection') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'EntityCollection') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class EntityMention: + """ + An object describing a contextual entity mention. + + :param str text: The text of the user input example. + :param str intent: The name of the intent. + :param List[int] location: An array of zero-based character offsets that + indicate where the entity mentions begin and end in the input text. + """ + + def __init__( + self, + text: str, + intent: str, + location: List[int], + ) -> None: + """ + Initialize a EntityMention object. + + :param str text: The text of the user input example. + :param str intent: The name of the intent. + :param List[int] location: An array of zero-based character offsets that + indicate where the entity mentions begin and end in the input text. + """ + self.text = text + self.intent = intent + self.location = location + + @classmethod + def from_dict(cls, _dict: Dict) -> 'EntityMention': + """Initialize a EntityMention object from a json dictionary.""" + args = {} + if (text := _dict.get('text')) is not None: + args['text'] = text + else: + raise ValueError( + 'Required property \'text\' not present in EntityMention JSON') + if (intent := _dict.get('intent')) is not None: + args['intent'] = intent + else: + raise ValueError( + 'Required property \'intent\' not present in EntityMention JSON' + ) + if (location := _dict.get('location')) is not None: + args['location'] = location + else: + raise ValueError( + 'Required property \'location\' not present in EntityMention JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a EntityMention object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'text') and self.text is not None: + _dict['text'] = self.text + if hasattr(self, 'intent') and self.intent is not None: + _dict['intent'] = self.intent + if hasattr(self, 'location') and self.location is not None: + _dict['location'] = self.location + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this EntityMention object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'EntityMention') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'EntityMention') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class EntityMentionCollection: + """ + EntityMentionCollection. + + :param List[EntityMention] examples: An array of objects describing the entity + mentions defined for an entity. + :param Pagination pagination: The pagination data for the returned objects. For + more information about using pagination, see [Pagination](#pagination). + """ + + def __init__( + self, + examples: List['EntityMention'], + pagination: 'Pagination', + ) -> None: + """ + Initialize a EntityMentionCollection object. + + :param List[EntityMention] examples: An array of objects describing the + entity mentions defined for an entity. + :param Pagination pagination: The pagination data for the returned objects. + For more information about using pagination, see [Pagination](#pagination). + """ + self.examples = examples + self.pagination = pagination + + @classmethod + def from_dict(cls, _dict: Dict) -> 'EntityMentionCollection': + """Initialize a EntityMentionCollection object from a json dictionary.""" + args = {} + if (examples := _dict.get('examples')) is not None: + args['examples'] = [EntityMention.from_dict(v) for v in examples] + else: + raise ValueError( + 'Required property \'examples\' not present in EntityMentionCollection JSON' + ) + if (pagination := _dict.get('pagination')) is not None: + args['pagination'] = Pagination.from_dict(pagination) + else: + raise ValueError( + 'Required property \'pagination\' not present in EntityMentionCollection JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a EntityMentionCollection object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'examples') and self.examples is not None: + examples_list = [] + for v in self.examples: + if isinstance(v, dict): + examples_list.append(v) + else: + examples_list.append(v.to_dict()) + _dict['examples'] = examples_list + if hasattr(self, 'pagination') and self.pagination is not None: + if isinstance(self.pagination, dict): + _dict['pagination'] = self.pagination + else: + _dict['pagination'] = self.pagination.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this EntityMentionCollection object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'EntityMentionCollection') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'EntityMentionCollection') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Example: + """ + Example. + + :param str text: The text of a user input example. This string must conform to + the following restrictions: + - It cannot contain carriage return, newline, or tab characters. + - It cannot consist of only whitespace characters. + :param List[Mention] mentions: (optional) An array of contextual entity + mentions. + :param datetime created: (optional) The timestamp for creation of the object. + :param datetime updated: (optional) The timestamp for the most recent update to + the object. + """ + + def __init__( + self, + text: str, + *, + mentions: Optional[List['Mention']] = None, + created: Optional[datetime] = None, + updated: Optional[datetime] = None, + ) -> None: + """ + Initialize a Example object. + + :param str text: The text of a user input example. This string must conform + to the following restrictions: + - It cannot contain carriage return, newline, or tab characters. + - It cannot consist of only whitespace characters. + :param List[Mention] mentions: (optional) An array of contextual entity + mentions. + """ + self.text = text + self.mentions = mentions + self.created = created + self.updated = updated + + @classmethod + def from_dict(cls, _dict: Dict) -> 'Example': + """Initialize a Example object from a json dictionary.""" + args = {} + if (text := _dict.get('text')) is not None: + args['text'] = text + else: + raise ValueError( + 'Required property \'text\' not present in Example JSON') + if (mentions := _dict.get('mentions')) is not None: + args['mentions'] = [Mention.from_dict(v) for v in mentions] + if (created := _dict.get('created')) is not None: + args['created'] = string_to_datetime(created) + if (updated := _dict.get('updated')) is not None: + args['updated'] = string_to_datetime(updated) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Example object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'text') and self.text is not None: + _dict['text'] = self.text + if hasattr(self, 'mentions') and self.mentions is not None: + mentions_list = [] + for v in self.mentions: + if isinstance(v, dict): + mentions_list.append(v) + else: + mentions_list.append(v.to_dict()) + _dict['mentions'] = mentions_list + if hasattr(self, 'created') and getattr(self, 'created') is not None: + _dict['created'] = datetime_to_string(getattr(self, 'created')) + if hasattr(self, 'updated') and getattr(self, 'updated') is not None: + _dict['updated'] = datetime_to_string(getattr(self, 'updated')) + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Example object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'Example') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'Example') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ExampleCollection: + """ + ExampleCollection. + + :param List[Example] examples: An array of objects describing the examples + defined for the intent. + :param Pagination pagination: The pagination data for the returned objects. For + more information about using pagination, see [Pagination](#pagination). + """ + + def __init__( + self, + examples: List['Example'], + pagination: 'Pagination', + ) -> None: + """ + Initialize a ExampleCollection object. + + :param List[Example] examples: An array of objects describing the examples + defined for the intent. + :param Pagination pagination: The pagination data for the returned objects. + For more information about using pagination, see [Pagination](#pagination). + """ + self.examples = examples + self.pagination = pagination + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ExampleCollection': + """Initialize a ExampleCollection object from a json dictionary.""" + args = {} + if (examples := _dict.get('examples')) is not None: + args['examples'] = [Example.from_dict(v) for v in examples] + else: + raise ValueError( + 'Required property \'examples\' not present in ExampleCollection JSON' + ) + if (pagination := _dict.get('pagination')) is not None: + args['pagination'] = Pagination.from_dict(pagination) + else: + raise ValueError( + 'Required property \'pagination\' not present in ExampleCollection JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ExampleCollection object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'examples') and self.examples is not None: + examples_list = [] + for v in self.examples: + if isinstance(v, dict): + examples_list.append(v) + else: + examples_list.append(v.to_dict()) + _dict['examples'] = examples_list + if hasattr(self, 'pagination') and self.pagination is not None: + if isinstance(self.pagination, dict): + _dict['pagination'] = self.pagination + else: + _dict['pagination'] = self.pagination.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ExampleCollection object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ExampleCollection') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ExampleCollection') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Intent: + """ + Intent. + + :param str intent: The name of the intent. This string must conform to the + following restrictions: + - It can contain only Unicode alphanumeric, underscore, hyphen, and dot + characters. + - It cannot begin with the reserved prefix `sys-`. + :param str description: (optional) The description of the intent. This string + cannot contain carriage return, newline, or tab characters. + :param datetime created: (optional) The timestamp for creation of the object. + :param datetime updated: (optional) The timestamp for the most recent update to + the object. + :param List[Example] examples: (optional) An array of user input examples for + the intent. + """ + + def __init__( + self, + intent: str, + *, + description: Optional[str] = None, + created: Optional[datetime] = None, + updated: Optional[datetime] = None, + examples: Optional[List['Example']] = None, + ) -> None: + """ + Initialize a Intent object. + + :param str intent: The name of the intent. This string must conform to the + following restrictions: + - It can contain only Unicode alphanumeric, underscore, hyphen, and dot + characters. + - It cannot begin with the reserved prefix `sys-`. + :param str description: (optional) The description of the intent. This + string cannot contain carriage return, newline, or tab characters. + :param List[Example] examples: (optional) An array of user input examples + for the intent. + """ + self.intent = intent + self.description = description + self.created = created + self.updated = updated + self.examples = examples + + @classmethod + def from_dict(cls, _dict: Dict) -> 'Intent': + """Initialize a Intent object from a json dictionary.""" + args = {} + if (intent := _dict.get('intent')) is not None: + args['intent'] = intent + else: + raise ValueError( + 'Required property \'intent\' not present in Intent JSON') + if (description := _dict.get('description')) is not None: + args['description'] = description + if (created := _dict.get('created')) is not None: + args['created'] = string_to_datetime(created) + if (updated := _dict.get('updated')) is not None: + args['updated'] = string_to_datetime(updated) + if (examples := _dict.get('examples')) is not None: + args['examples'] = [Example.from_dict(v) for v in examples] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Intent object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'intent') and self.intent is not None: + _dict['intent'] = self.intent + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'created') and getattr(self, 'created') is not None: + _dict['created'] = datetime_to_string(getattr(self, 'created')) + if hasattr(self, 'updated') and getattr(self, 'updated') is not None: + _dict['updated'] = datetime_to_string(getattr(self, 'updated')) + if hasattr(self, 'examples') and self.examples is not None: + examples_list = [] + for v in self.examples: + if isinstance(v, dict): + examples_list.append(v) + else: + examples_list.append(v.to_dict()) + _dict['examples'] = examples_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Intent object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'Intent') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'Intent') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class IntentCollection: + """ + IntentCollection. + + :param List[Intent] intents: An array of objects describing the intents defined + for the workspace. + :param Pagination pagination: The pagination data for the returned objects. For + more information about using pagination, see [Pagination](#pagination). + """ + + def __init__( + self, + intents: List['Intent'], + pagination: 'Pagination', + ) -> None: + """ + Initialize a IntentCollection object. + + :param List[Intent] intents: An array of objects describing the intents + defined for the workspace. + :param Pagination pagination: The pagination data for the returned objects. + For more information about using pagination, see [Pagination](#pagination). + """ + self.intents = intents + self.pagination = pagination + + @classmethod + def from_dict(cls, _dict: Dict) -> 'IntentCollection': + """Initialize a IntentCollection object from a json dictionary.""" + args = {} + if (intents := _dict.get('intents')) is not None: + args['intents'] = [Intent.from_dict(v) for v in intents] + else: + raise ValueError( + 'Required property \'intents\' not present in IntentCollection JSON' + ) + if (pagination := _dict.get('pagination')) is not None: + args['pagination'] = Pagination.from_dict(pagination) + else: + raise ValueError( + 'Required property \'pagination\' not present in IntentCollection JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a IntentCollection object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'intents') and self.intents is not None: + intents_list = [] + for v in self.intents: + if isinstance(v, dict): + intents_list.append(v) + else: + intents_list.append(v.to_dict()) + _dict['intents'] = intents_list + if hasattr(self, 'pagination') and self.pagination is not None: + if isinstance(self.pagination, dict): + _dict['pagination'] = self.pagination + else: + _dict['pagination'] = self.pagination.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this IntentCollection object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'IntentCollection') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'IntentCollection') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Log: + """ + Log. + + :param MessageRequest request: A request sent to the workspace, including the + user input and context. + :param MessageResponse response: The response sent by the workspace, including + the output text, detected intents and entities, and context. + :param str log_id: A unique identifier for the logged event. + :param str request_timestamp: The timestamp for receipt of the message. + :param str response_timestamp: The timestamp for the system response to the + message. + :param str workspace_id: The unique identifier of the workspace where the + request was made. + :param str language: The language of the workspace where the message request was + made. + """ + + def __init__( + self, + request: 'MessageRequest', + response: 'MessageResponse', + log_id: str, + request_timestamp: str, + response_timestamp: str, + workspace_id: str, + language: str, + ) -> None: + """ + Initialize a Log object. + + :param MessageRequest request: A request sent to the workspace, including + the user input and context. + :param MessageResponse response: The response sent by the workspace, + including the output text, detected intents and entities, and context. + :param str log_id: A unique identifier for the logged event. + :param str request_timestamp: The timestamp for receipt of the message. + :param str response_timestamp: The timestamp for the system response to the + message. + :param str workspace_id: The unique identifier of the workspace where the + request was made. + :param str language: The language of the workspace where the message + request was made. + """ + self.request = request + self.response = response + self.log_id = log_id + self.request_timestamp = request_timestamp + self.response_timestamp = response_timestamp + self.workspace_id = workspace_id + self.language = language + + @classmethod + def from_dict(cls, _dict: Dict) -> 'Log': + """Initialize a Log object from a json dictionary.""" + args = {} + if (request := _dict.get('request')) is not None: + args['request'] = MessageRequest.from_dict(request) + else: + raise ValueError( + 'Required property \'request\' not present in Log JSON') + if (response := _dict.get('response')) is not None: + args['response'] = MessageResponse.from_dict(response) + else: + raise ValueError( + 'Required property \'response\' not present in Log JSON') + if (log_id := _dict.get('log_id')) is not None: + args['log_id'] = log_id + else: + raise ValueError( + 'Required property \'log_id\' not present in Log JSON') + if (request_timestamp := _dict.get('request_timestamp')) is not None: + args['request_timestamp'] = request_timestamp + else: + raise ValueError( + 'Required property \'request_timestamp\' not present in Log JSON' + ) + if (response_timestamp := _dict.get('response_timestamp')) is not None: + args['response_timestamp'] = response_timestamp + else: + raise ValueError( + 'Required property \'response_timestamp\' not present in Log JSON' + ) + if (workspace_id := _dict.get('workspace_id')) is not None: + args['workspace_id'] = workspace_id + else: + raise ValueError( + 'Required property \'workspace_id\' not present in Log JSON') + if (language := _dict.get('language')) is not None: + args['language'] = language + else: + raise ValueError( + 'Required property \'language\' not present in Log JSON') + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Log object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'request') and self.request is not None: + if isinstance(self.request, dict): + _dict['request'] = self.request + else: + _dict['request'] = self.request.to_dict() + if hasattr(self, 'response') and self.response is not None: + if isinstance(self.response, dict): + _dict['response'] = self.response + else: + _dict['response'] = self.response.to_dict() + if hasattr(self, 'log_id') and self.log_id is not None: + _dict['log_id'] = self.log_id + if hasattr(self, + 'request_timestamp') and self.request_timestamp is not None: + _dict['request_timestamp'] = self.request_timestamp + if hasattr( + self, + 'response_timestamp') and self.response_timestamp is not None: + _dict['response_timestamp'] = self.response_timestamp + if hasattr(self, 'workspace_id') and self.workspace_id is not None: + _dict['workspace_id'] = self.workspace_id + if hasattr(self, 'language') and self.language is not None: + _dict['language'] = self.language + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Log object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'Log') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'Log') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class LogCollection: + """ + LogCollection. + + :param List[Log] logs: An array of objects describing log events. + :param LogPagination pagination: The pagination data for the returned objects. + For more information about using pagination, see [Pagination](#pagination). + """ + + def __init__( + self, + logs: List['Log'], + pagination: 'LogPagination', + ) -> None: + """ + Initialize a LogCollection object. + + :param List[Log] logs: An array of objects describing log events. + :param LogPagination pagination: The pagination data for the returned + objects. For more information about using pagination, see + [Pagination](#pagination). + """ + self.logs = logs + self.pagination = pagination + + @classmethod + def from_dict(cls, _dict: Dict) -> 'LogCollection': + """Initialize a LogCollection object from a json dictionary.""" + args = {} + if (logs := _dict.get('logs')) is not None: + args['logs'] = [Log.from_dict(v) for v in logs] + else: + raise ValueError( + 'Required property \'logs\' not present in LogCollection JSON') + if (pagination := _dict.get('pagination')) is not None: + args['pagination'] = LogPagination.from_dict(pagination) + else: + raise ValueError( + 'Required property \'pagination\' not present in LogCollection JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a LogCollection object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'logs') and self.logs is not None: + logs_list = [] + for v in self.logs: + if isinstance(v, dict): + logs_list.append(v) + else: + logs_list.append(v.to_dict()) + _dict['logs'] = logs_list + if hasattr(self, 'pagination') and self.pagination is not None: + if isinstance(self.pagination, dict): + _dict['pagination'] = self.pagination + else: + _dict['pagination'] = self.pagination.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this LogCollection object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'LogCollection') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'LogCollection') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class LogMessage: + """ + Log message details. + + :param str level: The severity of the log message. + :param str msg: The text of the log message. + :param str code: A code that indicates the category to which the error message + belongs. + :param LogMessageSource source: (optional) An object that identifies the dialog + element that generated the error message. + """ + + def __init__( + self, + level: str, + msg: str, + code: str, + *, + source: Optional['LogMessageSource'] = None, + ) -> None: + """ + Initialize a LogMessage object. + + :param str level: The severity of the log message. + :param str msg: The text of the log message. + :param str code: A code that indicates the category to which the error + message belongs. + :param LogMessageSource source: (optional) An object that identifies the + dialog element that generated the error message. + """ + self.level = level + self.msg = msg + self.code = code + self.source = source + + @classmethod + def from_dict(cls, _dict: Dict) -> 'LogMessage': + """Initialize a LogMessage object from a json dictionary.""" + args = {} + if (level := _dict.get('level')) is not None: + args['level'] = level + else: + raise ValueError( + 'Required property \'level\' not present in LogMessage JSON') + if (msg := _dict.get('msg')) is not None: + args['msg'] = msg + else: + raise ValueError( + 'Required property \'msg\' not present in LogMessage JSON') + if (code := _dict.get('code')) is not None: + args['code'] = code + else: + raise ValueError( + 'Required property \'code\' not present in LogMessage JSON') + if (source := _dict.get('source')) is not None: + args['source'] = LogMessageSource.from_dict(source) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a LogMessage object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'level') and self.level is not None: + _dict['level'] = self.level + if hasattr(self, 'msg') and self.msg is not None: + _dict['msg'] = self.msg + if hasattr(self, 'code') and self.code is not None: + _dict['code'] = self.code + if hasattr(self, 'source') and self.source is not None: + if isinstance(self.source, dict): + _dict['source'] = self.source + else: + _dict['source'] = self.source.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this LogMessage object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'LogMessage') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'LogMessage') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class LevelEnum(str, Enum): + """ + The severity of the log message. + """ + + INFO = 'info' + ERROR = 'error' + WARN = 'warn' + + +class LogMessageSource: + """ + An object that identifies the dialog element that generated the error message. + + :param str type: (optional) A string that indicates the type of dialog element + that generated the error message. + :param str dialog_node: (optional) The unique identifier of the dialog node that + generated the error message. + """ + + def __init__( + self, + *, + type: Optional[str] = None, + dialog_node: Optional[str] = None, + ) -> None: + """ + Initialize a LogMessageSource object. + + :param str type: (optional) A string that indicates the type of dialog + element that generated the error message. + :param str dialog_node: (optional) The unique identifier of the dialog node + that generated the error message. + """ + self.type = type + self.dialog_node = dialog_node + + @classmethod + def from_dict(cls, _dict: Dict) -> 'LogMessageSource': + """Initialize a LogMessageSource object from a json dictionary.""" + args = {} + if (type := _dict.get('type')) is not None: + args['type'] = type + if (dialog_node := _dict.get('dialog_node')) is not None: + args['dialog_node'] = dialog_node + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a LogMessageSource object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'dialog_node') and self.dialog_node is not None: + _dict['dialog_node'] = self.dialog_node + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this LogMessageSource object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'LogMessageSource') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'LogMessageSource') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class TypeEnum(str, Enum): + """ + A string that indicates the type of dialog element that generated the error + message. + """ + + DIALOG_NODE = 'dialog_node' + + +class LogPagination: + """ + The pagination data for the returned objects. For more information about using + pagination, see [Pagination](#pagination). + + :param str next_url: (optional) The URL that will return the next page of + results, if any. + :param int matched: (optional) Reserved for future use. + :param str next_cursor: (optional) A token identifying the next page of results. + """ + + def __init__( + self, + *, + next_url: Optional[str] = None, + matched: Optional[int] = None, + next_cursor: Optional[str] = None, + ) -> None: + """ + Initialize a LogPagination object. + + :param str next_url: (optional) The URL that will return the next page of + results, if any. + :param int matched: (optional) Reserved for future use. + :param str next_cursor: (optional) A token identifying the next page of + results. + """ + self.next_url = next_url + self.matched = matched + self.next_cursor = next_cursor + + @classmethod + def from_dict(cls, _dict: Dict) -> 'LogPagination': + """Initialize a LogPagination object from a json dictionary.""" + args = {} + if (next_url := _dict.get('next_url')) is not None: + args['next_url'] = next_url + if (matched := _dict.get('matched')) is not None: + args['matched'] = matched + if (next_cursor := _dict.get('next_cursor')) is not None: + args['next_cursor'] = next_cursor + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a LogPagination object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'next_url') and self.next_url is not None: + _dict['next_url'] = self.next_url + if hasattr(self, 'matched') and self.matched is not None: + _dict['matched'] = self.matched + if hasattr(self, 'next_cursor') and self.next_cursor is not None: + _dict['next_cursor'] = self.next_cursor + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this LogPagination object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'LogPagination') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'LogPagination') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Mention: + """ + A mention of a contextual entity. + + :param str entity: The name of the entity. + :param List[int] location: An array of zero-based character offsets that + indicate where the entity mentions begin and end in the input text. + """ + + def __init__( + self, + entity: str, + location: List[int], + ) -> None: + """ + Initialize a Mention object. + + :param str entity: The name of the entity. + :param List[int] location: An array of zero-based character offsets that + indicate where the entity mentions begin and end in the input text. + """ + self.entity = entity + self.location = location + + @classmethod + def from_dict(cls, _dict: Dict) -> 'Mention': + """Initialize a Mention object from a json dictionary.""" + args = {} + if (entity := _dict.get('entity')) is not None: + args['entity'] = entity + else: + raise ValueError( + 'Required property \'entity\' not present in Mention JSON') + if (location := _dict.get('location')) is not None: + args['location'] = location + else: + raise ValueError( + 'Required property \'location\' not present in Mention JSON') + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Mention object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'entity') and self.entity is not None: + _dict['entity'] = self.entity + if hasattr(self, 'location') and self.location is not None: + _dict['location'] = self.location + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Mention object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'Mention') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'Mention') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class MessageContextMetadata: + """ + Metadata related to the message. + + :param str deployment: (optional) A label identifying the deployment + environment, used for filtering log data. This string cannot contain carriage + return, newline, or tab characters. + :param str user_id: (optional) A string value that identifies the user who is + interacting with the workspace. The client must provide a unique identifier for + each individual end user who accesses the application. For user-based plans, + this user ID is used to identify unique users for billing purposes. This string + cannot contain carriage return, newline, or tab characters. If no value is + specified in the input, **user_id** is automatically set to the value of + **context.conversation_id**. + **Note:** This property is the same as the **user_id** property at the root of + the message body. If **user_id** is specified in both locations in a message + request, the value specified at the root is used. + """ + + def __init__( + self, + *, + deployment: Optional[str] = None, + user_id: Optional[str] = None, + ) -> None: + """ + Initialize a MessageContextMetadata object. + + :param str deployment: (optional) A label identifying the deployment + environment, used for filtering log data. This string cannot contain + carriage return, newline, or tab characters. + :param str user_id: (optional) A string value that identifies the user who + is interacting with the workspace. The client must provide a unique + identifier for each individual end user who accesses the application. For + user-based plans, this user ID is used to identify unique users for billing + purposes. This string cannot contain carriage return, newline, or tab + characters. If no value is specified in the input, **user_id** is + automatically set to the value of **context.conversation_id**. + **Note:** This property is the same as the **user_id** property at the root + of the message body. If **user_id** is specified in both locations in a + message request, the value specified at the root is used. + """ + self.deployment = deployment + self.user_id = user_id + + @classmethod + def from_dict(cls, _dict: Dict) -> 'MessageContextMetadata': + """Initialize a MessageContextMetadata object from a json dictionary.""" + args = {} + if (deployment := _dict.get('deployment')) is not None: + args['deployment'] = deployment + if (user_id := _dict.get('user_id')) is not None: + args['user_id'] = user_id + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageContextMetadata object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'deployment') and self.deployment is not None: + _dict['deployment'] = self.deployment + if hasattr(self, 'user_id') and self.user_id is not None: + _dict['user_id'] = self.user_id + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MessageContextMetadata object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'MessageContextMetadata') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'MessageContextMetadata') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class MessageInput: + """ + An input object that includes the input text. + + :param str text: (optional) The text of the user input. This string cannot + contain carriage return, newline, or tab characters. + :param bool spelling_suggestions: (optional) Whether to use spelling correction + when processing the input. This property overrides the value of the + **spelling_suggestions** property in the workspace settings. + :param bool spelling_auto_correct: (optional) Whether to use autocorrection when + processing the input. If spelling correction is used and this property is + `false`, any suggested corrections are returned in the **suggested_text** + property of the message response. If this property is `true`, any corrections + are automatically applied to the user input, and the original text is returned + in the **original_text** property of the message response. This property + overrides the value of the **spelling_auto_correct** property in the workspace + settings. + :param str suggested_text: (optional) Any suggested corrections of the input + text. This property is returned only if spelling correction is enabled and + autocorrection is disabled. + :param str original_text: (optional) The original user input text. This property + is returned only if autocorrection is enabled and the user input was corrected. + + This type supports additional properties of type object. Any additional data included + with the message input. + """ + + # The set of defined properties for the class + _properties = frozenset([ + 'text', 'spelling_suggestions', 'spelling_auto_correct', + 'suggested_text', 'original_text' + ]) + + def __init__( + self, + *, + text: Optional[str] = None, + spelling_suggestions: Optional[bool] = None, + spelling_auto_correct: Optional[bool] = None, + suggested_text: Optional[str] = None, + original_text: Optional[str] = None, + **kwargs: Optional[object], + ) -> None: + """ + Initialize a MessageInput object. + + :param str text: (optional) The text of the user input. This string cannot + contain carriage return, newline, or tab characters. + :param bool spelling_suggestions: (optional) Whether to use spelling + correction when processing the input. This property overrides the value of + the **spelling_suggestions** property in the workspace settings. + :param bool spelling_auto_correct: (optional) Whether to use autocorrection + when processing the input. If spelling correction is used and this property + is `false`, any suggested corrections are returned in the + **suggested_text** property of the message response. If this property is + `true`, any corrections are automatically applied to the user input, and + the original text is returned in the **original_text** property of the + message response. This property overrides the value of the + **spelling_auto_correct** property in the workspace settings. + :param object **kwargs: (optional) Any additional data included with the + message input. + """ + self.text = text + self.spelling_suggestions = spelling_suggestions + self.spelling_auto_correct = spelling_auto_correct + self.suggested_text = suggested_text + self.original_text = original_text + for k, v in kwargs.items(): + if k not in MessageInput._properties: + if not isinstance(v, object): + raise ValueError( + 'Value for additional property {} must be of type object' + .format(k)) + setattr(self, k, v) + else: + raise ValueError( + 'Property {} cannot be specified as an additional property'. + format(k)) + + @classmethod + def from_dict(cls, _dict: Dict) -> 'MessageInput': + """Initialize a MessageInput object from a json dictionary.""" + args = {} + if (text := _dict.get('text')) is not None: + args['text'] = text + if (spelling_suggestions := + _dict.get('spelling_suggestions')) is not None: + args['spelling_suggestions'] = spelling_suggestions + if (spelling_auto_correct := + _dict.get('spelling_auto_correct')) is not None: + args['spelling_auto_correct'] = spelling_auto_correct + if (suggested_text := _dict.get('suggested_text')) is not None: + args['suggested_text'] = suggested_text + if (original_text := _dict.get('original_text')) is not None: + args['original_text'] = original_text + for k, v in _dict.items(): + if k not in cls._properties: + if not isinstance(v, object): + raise ValueError( + 'Value for additional property {} must be of type object' + .format(k)) + args[k] = v + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageInput object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'text') and self.text is not None: + _dict['text'] = self.text + if hasattr(self, 'spelling_suggestions' + ) and self.spelling_suggestions is not None: + _dict['spelling_suggestions'] = self.spelling_suggestions + if hasattr(self, 'spelling_auto_correct' + ) and self.spelling_auto_correct is not None: + _dict['spelling_auto_correct'] = self.spelling_auto_correct + if hasattr(self, 'suggested_text') and getattr( + self, 'suggested_text') is not None: + _dict['suggested_text'] = getattr(self, 'suggested_text') + if hasattr(self, 'original_text') and getattr( + self, 'original_text') is not None: + _dict['original_text'] = getattr(self, 'original_text') + for k in [ + _k for _k in vars(self).keys() + if _k not in MessageInput._properties + ]: + _dict[k] = getattr(self, k) + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def get_properties(self) -> Dict: + """Return the additional properties from this instance of MessageInput in the form of a dict.""" + _dict = {} + for k in [ + _k for _k in vars(self).keys() + if _k not in MessageInput._properties + ]: + _dict[k] = getattr(self, k) + return _dict + + def set_properties(self, _dict: dict): + """Set a dictionary of additional properties in this instance of MessageInput""" + for k in [ + _k for _k in vars(self).keys() + if _k not in MessageInput._properties + ]: + delattr(self, k) + for k, v in _dict.items(): + if k not in MessageInput._properties: + if not isinstance(v, object): + raise ValueError( + 'Value for additional property {} must be of type object' + .format(k)) + setattr(self, k, v) + else: + raise ValueError( + 'Property {} cannot be specified as an additional property'. + format(k)) + + def __str__(self) -> str: + """Return a `str` version of this MessageInput object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'MessageInput') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'MessageInput') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class MessageRequest: + """ + A request sent to the workspace, including the user input and context. + + :param MessageInput input: (optional) An input object that includes the input + text. + :param List[RuntimeIntent] intents: (optional) Intents to use when evaluating + the user input. Include intents from the previous response to continue using + those intents rather than trying to recognize intents in the new input. + :param List[RuntimeEntity] entities: (optional) Entities to use when evaluating + the message. Include entities from the previous response to continue using those + entities rather than detecting entities in the new input. + :param bool alternate_intents: (optional) Whether to return more than one + intent. A value of `true` indicates that all matching intents are returned. + :param Context context: (optional) State information for the conversation. To + maintain state, include the context from the previous response. + :param OutputData output: (optional) An output object that includes the response + to the user, the dialog nodes that were triggered, and messages from the log. + :param List[DialogNodeAction] actions: (optional) An array of objects describing + any actions requested by the dialog node. + :param str user_id: (optional) A string value that identifies the user who is + interacting with the workspace. The client must provide a unique identifier for + each individual end user who accesses the application. For user-based plans, + this user ID is used to identify unique users for billing purposes. This string + cannot contain carriage return, newline, or tab characters. If no value is + specified in the input, **user_id** is automatically set to the value of + **context.conversation_id**. + **Note:** This property is the same as the **user_id** property in the context + metadata. If **user_id** is specified in both locations in a message request, + the value specified at the root is used. + """ + + def __init__( + self, + *, + input: Optional['MessageInput'] = None, + intents: Optional[List['RuntimeIntent']] = None, + entities: Optional[List['RuntimeEntity']] = None, + alternate_intents: Optional[bool] = None, + context: Optional['Context'] = None, + output: Optional['OutputData'] = None, + actions: Optional[List['DialogNodeAction']] = None, + user_id: Optional[str] = None, + ) -> None: + """ + Initialize a MessageRequest object. + + :param MessageInput input: (optional) An input object that includes the + input text. + :param List[RuntimeIntent] intents: (optional) Intents to use when + evaluating the user input. Include intents from the previous response to + continue using those intents rather than trying to recognize intents in the + new input. + :param List[RuntimeEntity] entities: (optional) Entities to use when + evaluating the message. Include entities from the previous response to + continue using those entities rather than detecting entities in the new + input. + :param bool alternate_intents: (optional) Whether to return more than one + intent. A value of `true` indicates that all matching intents are returned. + :param Context context: (optional) State information for the conversation. + To maintain state, include the context from the previous response. + :param OutputData output: (optional) An output object that includes the + response to the user, the dialog nodes that were triggered, and messages + from the log. + :param str user_id: (optional) A string value that identifies the user who + is interacting with the workspace. The client must provide a unique + identifier for each individual end user who accesses the application. For + user-based plans, this user ID is used to identify unique users for billing + purposes. This string cannot contain carriage return, newline, or tab + characters. If no value is specified in the input, **user_id** is + automatically set to the value of **context.conversation_id**. + **Note:** This property is the same as the **user_id** property in the + context metadata. If **user_id** is specified in both locations in a + message request, the value specified at the root is used. + """ + self.input = input + self.intents = intents + self.entities = entities + self.alternate_intents = alternate_intents + self.context = context + self.output = output + self.actions = actions + self.user_id = user_id + + @classmethod + def from_dict(cls, _dict: Dict) -> 'MessageRequest': + """Initialize a MessageRequest object from a json dictionary.""" + args = {} + if (input := _dict.get('input')) is not None: + args['input'] = MessageInput.from_dict(input) + if (intents := _dict.get('intents')) is not None: + args['intents'] = [RuntimeIntent.from_dict(v) for v in intents] + if (entities := _dict.get('entities')) is not None: + args['entities'] = [RuntimeEntity.from_dict(v) for v in entities] + if (alternate_intents := _dict.get('alternate_intents')) is not None: + args['alternate_intents'] = alternate_intents + if (context := _dict.get('context')) is not None: + args['context'] = Context.from_dict(context) + if (output := _dict.get('output')) is not None: + args['output'] = OutputData.from_dict(output) + if (actions := _dict.get('actions')) is not None: + args['actions'] = [DialogNodeAction.from_dict(v) for v in actions] + if (user_id := _dict.get('user_id')) is not None: + args['user_id'] = user_id + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageRequest object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'input') and self.input is not None: + if isinstance(self.input, dict): + _dict['input'] = self.input + else: + _dict['input'] = self.input.to_dict() + if hasattr(self, 'intents') and self.intents is not None: + intents_list = [] + for v in self.intents: + if isinstance(v, dict): + intents_list.append(v) + else: + intents_list.append(v.to_dict()) + _dict['intents'] = intents_list + if hasattr(self, 'entities') and self.entities is not None: + entities_list = [] + for v in self.entities: + if isinstance(v, dict): + entities_list.append(v) + else: + entities_list.append(v.to_dict()) + _dict['entities'] = entities_list + if hasattr(self, + 'alternate_intents') and self.alternate_intents is not None: + _dict['alternate_intents'] = self.alternate_intents + if hasattr(self, 'context') and self.context is not None: + if isinstance(self.context, dict): + _dict['context'] = self.context + else: + _dict['context'] = self.context.to_dict() + if hasattr(self, 'output') and self.output is not None: + if isinstance(self.output, dict): + _dict['output'] = self.output + else: + _dict['output'] = self.output.to_dict() + if hasattr(self, 'actions') and getattr(self, 'actions') is not None: + actions_list = [] + for v in getattr(self, 'actions'): + if isinstance(v, dict): + actions_list.append(v) + else: + actions_list.append(v.to_dict()) + _dict['actions'] = actions_list + if hasattr(self, 'user_id') and self.user_id is not None: + _dict['user_id'] = self.user_id + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MessageRequest object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'MessageRequest') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'MessageRequest') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class MessageResponse: + """ + The response sent by the workspace, including the output text, detected intents and + entities, and context. + + :param MessageInput input: An input object that includes the input text. + :param List[RuntimeIntent] intents: An array of intents recognized in the user + input, sorted in descending order of confidence. + :param List[RuntimeEntity] entities: An array of entities identified in the user + input. + :param bool alternate_intents: (optional) Whether to return more than one + intent. A value of `true` indicates that all matching intents are returned. + :param Context context: State information for the conversation. To maintain + state, include the context from the previous response. + :param OutputData output: An output object that includes the response to the + user, the dialog nodes that were triggered, and messages from the log. + :param List[DialogNodeAction] actions: (optional) An array of objects describing + any actions requested by the dialog node. + :param str user_id: A string value that identifies the user who is interacting + with the workspace. The client must provide a unique identifier for each + individual end user who accesses the application. For user-based plans, this + user ID is used to identify unique users for billing purposes. This string + cannot contain carriage return, newline, or tab characters. If no value is + specified in the input, **user_id** is automatically set to the value of + **context.conversation_id**. + **Note:** This property is the same as the **user_id** property in the context + metadata. If **user_id** is specified in both locations in a message request, + the value specified at the root is used. + """ + + def __init__( + self, + input: 'MessageInput', + intents: List['RuntimeIntent'], + entities: List['RuntimeEntity'], + context: 'Context', + output: 'OutputData', + user_id: str, + *, + alternate_intents: Optional[bool] = None, + actions: Optional[List['DialogNodeAction']] = None, + ) -> None: + """ + Initialize a MessageResponse object. + + :param MessageInput input: An input object that includes the input text. + :param List[RuntimeIntent] intents: An array of intents recognized in the + user input, sorted in descending order of confidence. + :param List[RuntimeEntity] entities: An array of entities identified in the + user input. + :param Context context: State information for the conversation. To maintain + state, include the context from the previous response. + :param OutputData output: An output object that includes the response to + the user, the dialog nodes that were triggered, and messages from the log. + :param str user_id: A string value that identifies the user who is + interacting with the workspace. The client must provide a unique identifier + for each individual end user who accesses the application. For user-based + plans, this user ID is used to identify unique users for billing purposes. + This string cannot contain carriage return, newline, or tab characters. If + no value is specified in the input, **user_id** is automatically set to the + value of **context.conversation_id**. + **Note:** This property is the same as the **user_id** property in the + context metadata. If **user_id** is specified in both locations in a + message request, the value specified at the root is used. + :param bool alternate_intents: (optional) Whether to return more than one + intent. A value of `true` indicates that all matching intents are returned. + """ + self.input = input + self.intents = intents + self.entities = entities + self.alternate_intents = alternate_intents + self.context = context + self.output = output + self.actions = actions + self.user_id = user_id + + @classmethod + def from_dict(cls, _dict: Dict) -> 'MessageResponse': + """Initialize a MessageResponse object from a json dictionary.""" + args = {} + if (input := _dict.get('input')) is not None: + args['input'] = MessageInput.from_dict(input) + else: + raise ValueError( + 'Required property \'input\' not present in MessageResponse JSON' + ) + if (intents := _dict.get('intents')) is not None: + args['intents'] = [RuntimeIntent.from_dict(v) for v in intents] + else: + raise ValueError( + 'Required property \'intents\' not present in MessageResponse JSON' + ) + if (entities := _dict.get('entities')) is not None: + args['entities'] = [RuntimeEntity.from_dict(v) for v in entities] + else: + raise ValueError( + 'Required property \'entities\' not present in MessageResponse JSON' + ) + if (alternate_intents := _dict.get('alternate_intents')) is not None: + args['alternate_intents'] = alternate_intents + if (context := _dict.get('context')) is not None: + args['context'] = Context.from_dict(context) + else: + raise ValueError( + 'Required property \'context\' not present in MessageResponse JSON' + ) + if (output := _dict.get('output')) is not None: + args['output'] = OutputData.from_dict(output) + else: + raise ValueError( + 'Required property \'output\' not present in MessageResponse JSON' + ) + if (actions := _dict.get('actions')) is not None: + args['actions'] = [DialogNodeAction.from_dict(v) for v in actions] + if (user_id := _dict.get('user_id')) is not None: + args['user_id'] = user_id + else: + raise ValueError( + 'Required property \'user_id\' not present in MessageResponse JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageResponse object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'input') and self.input is not None: + if isinstance(self.input, dict): + _dict['input'] = self.input + else: + _dict['input'] = self.input.to_dict() + if hasattr(self, 'intents') and self.intents is not None: + intents_list = [] + for v in self.intents: + if isinstance(v, dict): + intents_list.append(v) + else: + intents_list.append(v.to_dict()) + _dict['intents'] = intents_list + if hasattr(self, 'entities') and self.entities is not None: + entities_list = [] + for v in self.entities: + if isinstance(v, dict): + entities_list.append(v) + else: + entities_list.append(v.to_dict()) + _dict['entities'] = entities_list + if hasattr(self, + 'alternate_intents') and self.alternate_intents is not None: + _dict['alternate_intents'] = self.alternate_intents + if hasattr(self, 'context') and self.context is not None: + if isinstance(self.context, dict): + _dict['context'] = self.context + else: + _dict['context'] = self.context.to_dict() + if hasattr(self, 'output') and self.output is not None: + if isinstance(self.output, dict): + _dict['output'] = self.output + else: + _dict['output'] = self.output.to_dict() + if hasattr(self, 'actions') and getattr(self, 'actions') is not None: + actions_list = [] + for v in getattr(self, 'actions'): + if isinstance(v, dict): + actions_list.append(v) + else: + actions_list.append(v.to_dict()) + _dict['actions'] = actions_list + if hasattr(self, 'user_id') and self.user_id is not None: + _dict['user_id'] = self.user_id + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MessageResponse object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'MessageResponse') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'MessageResponse') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class OutputData: + """ + An output object that includes the response to the user, the dialog nodes that were + triggered, and messages from the log. + + :param List[str] nodes_visited: (optional) An array of the nodes that were + triggered to create the response, in the order in which they were visited. This + information is useful for debugging and for tracing the path taken through the + node tree. + :param List[DialogNodeVisitedDetails] nodes_visited_details: (optional) An array + of objects containing detailed diagnostic information about the nodes that were + triggered during processing of the input message. Included only if + **nodes_visited_details** is set to `true` in the message request. + :param List[LogMessage] log_messages: An array of up to 50 messages logged with + the request. + :param List[RuntimeResponseGeneric] generic: (optional) Output intended for any + channel. It is the responsibility of the client application to implement the + supported response types. + + This type supports additional properties of type object. Any additional data included + with the output. + """ + + # The set of defined properties for the class + _properties = frozenset( + ['nodes_visited', 'nodes_visited_details', 'log_messages', 'generic']) + + def __init__( + self, + log_messages: List['LogMessage'], + *, + nodes_visited: Optional[List[str]] = None, + nodes_visited_details: Optional[ + List['DialogNodeVisitedDetails']] = None, + generic: Optional[List['RuntimeResponseGeneric']] = None, + **kwargs: Optional[object], + ) -> None: + """ + Initialize a OutputData object. + + :param List[LogMessage] log_messages: An array of up to 50 messages logged + with the request. + :param List[str] nodes_visited: (optional) An array of the nodes that were + triggered to create the response, in the order in which they were visited. + This information is useful for debugging and for tracing the path taken + through the node tree. + :param List[DialogNodeVisitedDetails] nodes_visited_details: (optional) An + array of objects containing detailed diagnostic information about the nodes + that were triggered during processing of the input message. Included only + if **nodes_visited_details** is set to `true` in the message request. + :param List[RuntimeResponseGeneric] generic: (optional) Output intended for + any channel. It is the responsibility of the client application to + implement the supported response types. + :param object **kwargs: (optional) Any additional data included with the + output. + """ + self.nodes_visited = nodes_visited + self.nodes_visited_details = nodes_visited_details + self.log_messages = log_messages + self.generic = generic + for k, v in kwargs.items(): + if k not in OutputData._properties: + if not isinstance(v, object): + raise ValueError( + 'Value for additional property {} must be of type object' + .format(k)) + setattr(self, k, v) + else: + raise ValueError( + 'Property {} cannot be specified as an additional property'. + format(k)) + + @classmethod + def from_dict(cls, _dict: Dict) -> 'OutputData': + """Initialize a OutputData object from a json dictionary.""" + args = {} + if (nodes_visited := _dict.get('nodes_visited')) is not None: + args['nodes_visited'] = nodes_visited + if (nodes_visited_details := + _dict.get('nodes_visited_details')) is not None: + args['nodes_visited_details'] = [ + DialogNodeVisitedDetails.from_dict(v) + for v in nodes_visited_details + ] + if (log_messages := _dict.get('log_messages')) is not None: + args['log_messages'] = [ + LogMessage.from_dict(v) for v in log_messages + ] + else: + raise ValueError( + 'Required property \'log_messages\' not present in OutputData JSON' + ) + if (generic := _dict.get('generic')) is not None: + args['generic'] = [ + RuntimeResponseGeneric.from_dict(v) for v in generic + ] + for k, v in _dict.items(): + if k not in cls._properties: + if not isinstance(v, object): + raise ValueError( + 'Value for additional property {} must be of type object' + .format(k)) + args[k] = v + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a OutputData object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'nodes_visited') and self.nodes_visited is not None: + _dict['nodes_visited'] = self.nodes_visited + if hasattr(self, 'nodes_visited_details' + ) and self.nodes_visited_details is not None: + nodes_visited_details_list = [] + for v in self.nodes_visited_details: + if isinstance(v, dict): + nodes_visited_details_list.append(v) + else: + nodes_visited_details_list.append(v.to_dict()) + _dict['nodes_visited_details'] = nodes_visited_details_list + if hasattr(self, 'log_messages') and self.log_messages is not None: + log_messages_list = [] + for v in self.log_messages: + if isinstance(v, dict): + log_messages_list.append(v) + else: + log_messages_list.append(v.to_dict()) + _dict['log_messages'] = log_messages_list + if hasattr(self, 'generic') and self.generic is not None: + generic_list = [] + for v in self.generic: + if isinstance(v, dict): + generic_list.append(v) + else: + generic_list.append(v.to_dict()) + _dict['generic'] = generic_list + for k in [ + _k for _k in vars(self).keys() + if _k not in OutputData._properties + ]: + _dict[k] = getattr(self, k) + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def get_properties(self) -> Dict: + """Return the additional properties from this instance of OutputData in the form of a dict.""" + _dict = {} + for k in [ + _k for _k in vars(self).keys() + if _k not in OutputData._properties + ]: + _dict[k] = getattr(self, k) + return _dict + + def set_properties(self, _dict: dict): + """Set a dictionary of additional properties in this instance of OutputData""" + for k in [ + _k for _k in vars(self).keys() + if _k not in OutputData._properties + ]: + delattr(self, k) + for k, v in _dict.items(): + if k not in OutputData._properties: + if not isinstance(v, object): + raise ValueError( + 'Value for additional property {} must be of type object' + .format(k)) + setattr(self, k, v) + else: + raise ValueError( + 'Property {} cannot be specified as an additional property'. + format(k)) + + def __str__(self) -> str: + """Return a `str` version of this OutputData object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'OutputData') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'OutputData') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Pagination: + """ + The pagination data for the returned objects. For more information about using + pagination, see [Pagination](#pagination). + + :param str refresh_url: The URL that will return the same page of results. + :param str next_url: (optional) The URL that will return the next page of + results. + :param int total: (optional) The total number of objects that satisfy the + request. This total includes all results, not just those included in the current + page. + :param int matched: (optional) Reserved for future use. + :param str refresh_cursor: (optional) A token identifying the current page of + results. + :param str next_cursor: (optional) A token identifying the next page of results. + """ + + def __init__( + self, + refresh_url: str, + *, + next_url: Optional[str] = None, + total: Optional[int] = None, + matched: Optional[int] = None, + refresh_cursor: Optional[str] = None, + next_cursor: Optional[str] = None, + ) -> None: + """ + Initialize a Pagination object. + + :param str refresh_url: The URL that will return the same page of results. + :param str next_url: (optional) The URL that will return the next page of + results. + :param int total: (optional) The total number of objects that satisfy the + request. This total includes all results, not just those included in the + current page. + :param int matched: (optional) Reserved for future use. + :param str refresh_cursor: (optional) A token identifying the current page + of results. + :param str next_cursor: (optional) A token identifying the next page of + results. + """ + self.refresh_url = refresh_url + self.next_url = next_url + self.total = total + self.matched = matched + self.refresh_cursor = refresh_cursor + self.next_cursor = next_cursor + + @classmethod + def from_dict(cls, _dict: Dict) -> 'Pagination': + """Initialize a Pagination object from a json dictionary.""" + args = {} + if (refresh_url := _dict.get('refresh_url')) is not None: + args['refresh_url'] = refresh_url + else: + raise ValueError( + 'Required property \'refresh_url\' not present in Pagination JSON' + ) + if (next_url := _dict.get('next_url')) is not None: + args['next_url'] = next_url + if (total := _dict.get('total')) is not None: + args['total'] = total + if (matched := _dict.get('matched')) is not None: + args['matched'] = matched + if (refresh_cursor := _dict.get('refresh_cursor')) is not None: + args['refresh_cursor'] = refresh_cursor + if (next_cursor := _dict.get('next_cursor')) is not None: + args['next_cursor'] = next_cursor + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Pagination object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'refresh_url') and self.refresh_url is not None: + _dict['refresh_url'] = self.refresh_url + if hasattr(self, 'next_url') and self.next_url is not None: + _dict['next_url'] = self.next_url + if hasattr(self, 'total') and self.total is not None: + _dict['total'] = self.total + if hasattr(self, 'matched') and self.matched is not None: + _dict['matched'] = self.matched + if hasattr(self, 'refresh_cursor') and self.refresh_cursor is not None: + _dict['refresh_cursor'] = self.refresh_cursor + if hasattr(self, 'next_cursor') and self.next_cursor is not None: + _dict['next_cursor'] = self.next_cursor + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Pagination object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'Pagination') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'Pagination') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ResponseGenericChannel: + """ + ResponseGenericChannel. + + :param str channel: (optional) A channel for which the response is intended. + **Note:** On IBM Cloud Pak for Data, only `chat` is supported. + """ + + def __init__( + self, + *, + channel: Optional[str] = None, + ) -> None: + """ + Initialize a ResponseGenericChannel object. + + :param str channel: (optional) A channel for which the response is + intended. + **Note:** On IBM Cloud Pak for Data, only `chat` is supported. + """ + self.channel = channel + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ResponseGenericChannel': + """Initialize a ResponseGenericChannel object from a json dictionary.""" + args = {} + if (channel := _dict.get('channel')) is not None: + args['channel'] = channel + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ResponseGenericChannel object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'channel') and self.channel is not None: + _dict['channel'] = self.channel + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ResponseGenericChannel object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ResponseGenericChannel') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ResponseGenericChannel') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class ChannelEnum(str, Enum): + """ + A channel for which the response is intended. + **Note:** On IBM Cloud Pak for Data, only `chat` is supported. + """ + + CHAT = 'chat' + FACEBOOK = 'facebook' + INTERCOM = 'intercom' + SLACK = 'slack' + TEXT_MESSAGING = 'text_messaging' + VOICE_TELEPHONY = 'voice_telephony' + WHATSAPP = 'whatsapp' + + +class RuntimeEntity: + """ + A term from the request that was identified as an entity. + + :param str entity: An entity detected in the input. + :param List[int] location: (optional) An array of zero-based character offsets + that indicate where the detected entity values begin and end in the input text. + :param str value: The entity value that was recognized in the user input. + :param float confidence: (optional) A decimal percentage that represents + confidence in the recognized entity. + :param List[CaptureGroup] groups: (optional) The recognized capture groups for + the entity, as defined by the entity pattern. + :param RuntimeEntityInterpretation interpretation: (optional) An object + containing detailed information about the entity recognized in the user input. + For more information about how system entities are interpreted, see the + [documentation](https://cloud.ibm.com/docs/assistant?topic=assistant-system-entities). + :param List[RuntimeEntityAlternative] alternatives: (optional) An array of + possible alternative values that the user might have intended instead of the + value returned in the **value** property. This property is returned only for + `@sys-time` and `@sys-date` entities when the user's input is ambiguous. + This property is included only if the new system entities are enabled for the + workspace. + :param RuntimeEntityRole role: (optional) An object describing the role played + by a system entity that is specifies the beginning or end of a range recognized + in the user input. This property is included only if the new system entities are + enabled for the workspace. + """ + + def __init__( + self, + entity: str, + value: str, + *, + location: Optional[List[int]] = None, + confidence: Optional[float] = None, + groups: Optional[List['CaptureGroup']] = None, + interpretation: Optional['RuntimeEntityInterpretation'] = None, + alternatives: Optional[List['RuntimeEntityAlternative']] = None, + role: Optional['RuntimeEntityRole'] = None, + ) -> None: + """ + Initialize a RuntimeEntity object. + + :param str entity: An entity detected in the input. + :param str value: The entity value that was recognized in the user input. + :param List[int] location: (optional) An array of zero-based character + offsets that indicate where the detected entity values begin and end in the + input text. + :param float confidence: (optional) A decimal percentage that represents + confidence in the recognized entity. + :param List[CaptureGroup] groups: (optional) The recognized capture groups + for the entity, as defined by the entity pattern. + :param RuntimeEntityInterpretation interpretation: (optional) An object + containing detailed information about the entity recognized in the user + input. + For more information about how system entities are interpreted, see the + [documentation](https://cloud.ibm.com/docs/assistant?topic=assistant-system-entities). + :param List[RuntimeEntityAlternative] alternatives: (optional) An array of + possible alternative values that the user might have intended instead of + the value returned in the **value** property. This property is returned + only for `@sys-time` and `@sys-date` entities when the user's input is + ambiguous. + This property is included only if the new system entities are enabled for + the workspace. + :param RuntimeEntityRole role: (optional) An object describing the role + played by a system entity that is specifies the beginning or end of a range + recognized in the user input. This property is included only if the new + system entities are enabled for the workspace. + """ + self.entity = entity + self.location = location + self.value = value + self.confidence = confidence + self.groups = groups + self.interpretation = interpretation + self.alternatives = alternatives + self.role = role + + @classmethod + def from_dict(cls, _dict: Dict) -> 'RuntimeEntity': + """Initialize a RuntimeEntity object from a json dictionary.""" + args = {} + if (entity := _dict.get('entity')) is not None: + args['entity'] = entity + else: + raise ValueError( + 'Required property \'entity\' not present in RuntimeEntity JSON' + ) + if (location := _dict.get('location')) is not None: + args['location'] = location + if (value := _dict.get('value')) is not None: + args['value'] = value + else: + raise ValueError( + 'Required property \'value\' not present in RuntimeEntity JSON') + if (confidence := _dict.get('confidence')) is not None: + args['confidence'] = confidence + if (groups := _dict.get('groups')) is not None: + args['groups'] = [CaptureGroup.from_dict(v) for v in groups] + if (interpretation := _dict.get('interpretation')) is not None: + args['interpretation'] = RuntimeEntityInterpretation.from_dict( + interpretation) + if (alternatives := _dict.get('alternatives')) is not None: + args['alternatives'] = [ + RuntimeEntityAlternative.from_dict(v) for v in alternatives + ] + if (role := _dict.get('role')) is not None: + args['role'] = RuntimeEntityRole.from_dict(role) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a RuntimeEntity object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'entity') and self.entity is not None: + _dict['entity'] = self.entity + if hasattr(self, 'location') and self.location is not None: + _dict['location'] = self.location + if hasattr(self, 'value') and self.value is not None: + _dict['value'] = self.value + if hasattr(self, 'confidence') and self.confidence is not None: + _dict['confidence'] = self.confidence + if hasattr(self, 'groups') and self.groups is not None: + groups_list = [] + for v in self.groups: + if isinstance(v, dict): + groups_list.append(v) + else: + groups_list.append(v.to_dict()) + _dict['groups'] = groups_list + if hasattr(self, 'interpretation') and self.interpretation is not None: + if isinstance(self.interpretation, dict): + _dict['interpretation'] = self.interpretation + else: + _dict['interpretation'] = self.interpretation.to_dict() + if hasattr(self, 'alternatives') and self.alternatives is not None: + alternatives_list = [] + for v in self.alternatives: + if isinstance(v, dict): + alternatives_list.append(v) + else: + alternatives_list.append(v.to_dict()) + _dict['alternatives'] = alternatives_list + if hasattr(self, 'role') and self.role is not None: + if isinstance(self.role, dict): + _dict['role'] = self.role + else: + _dict['role'] = self.role.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this RuntimeEntity object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'RuntimeEntity') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'RuntimeEntity') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class RuntimeEntityAlternative: + """ + An alternative value for the recognized entity. + + :param str value: (optional) The entity value that was recognized in the user + input. + :param float confidence: (optional) A decimal percentage that represents + confidence in the recognized entity. + """ + + def __init__( + self, + *, + value: Optional[str] = None, + confidence: Optional[float] = None, + ) -> None: + """ + Initialize a RuntimeEntityAlternative object. + + :param str value: (optional) The entity value that was recognized in the + user input. + :param float confidence: (optional) A decimal percentage that represents + confidence in the recognized entity. + """ + self.value = value + self.confidence = confidence + + @classmethod + def from_dict(cls, _dict: Dict) -> 'RuntimeEntityAlternative': + """Initialize a RuntimeEntityAlternative object from a json dictionary.""" + args = {} + if (value := _dict.get('value')) is not None: + args['value'] = value + if (confidence := _dict.get('confidence')) is not None: + args['confidence'] = confidence + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a RuntimeEntityAlternative object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'value') and self.value is not None: + _dict['value'] = self.value + if hasattr(self, 'confidence') and self.confidence is not None: + _dict['confidence'] = self.confidence + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this RuntimeEntityAlternative object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'RuntimeEntityAlternative') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'RuntimeEntityAlternative') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class RuntimeEntityInterpretation: + """ + RuntimeEntityInterpretation. + + :param str calendar_type: (optional) The calendar used to represent a recognized + date (for example, `Gregorian`). + :param str datetime_link: (optional) A unique identifier used to associate a + recognized time and date. If the user input contains a date and time that are + mentioned together (for example, `Today at 5`, the same **datetime_link** value + is returned for both the `@sys-date` and `@sys-time` entities). + :param str festival: (optional) A locale-specific holiday name (such as + `thanksgiving` or `christmas`). This property is included when a `@sys-date` + entity is recognized based on a holiday name in the user input. + :param str granularity: (optional) The precision or duration of a time range + specified by a recognized `@sys-time` or `@sys-date` entity. + :param str range_link: (optional) A unique identifier used to associate multiple + recognized `@sys-date`, `@sys-time`, or `@sys-number` entities that are + recognized as a range of values in the user's input (for example, `from July 4 + until July 14` or `from 20 to 25`). + :param str range_modifier: (optional) The word in the user input that indicates + that a `sys-date` or `sys-time` entity is part of an implied range where only + one date or time is specified (for example, `since` or `until`). + :param float relative_day: (optional) A recognized mention of a relative day, + represented numerically as an offset from the current date (for example, `-1` + for `yesterday` or `10` for `in ten days`). + :param float relative_month: (optional) A recognized mention of a relative + month, represented numerically as an offset from the current month (for example, + `1` for `next month` or `-3` for `three months ago`). + :param float relative_week: (optional) A recognized mention of a relative week, + represented numerically as an offset from the current week (for example, `2` for + `in two weeks` or `-1` for `last week). + :param float relative_weekend: (optional) A recognized mention of a relative + date range for a weekend, represented numerically as an offset from the current + weekend (for example, `0` for `this weekend` or `-1` for `last weekend`). + :param float relative_year: (optional) A recognized mention of a relative year, + represented numerically as an offset from the current year (for example, `1` for + `next year` or `-5` for `five years ago`). + :param float specific_day: (optional) A recognized mention of a specific date, + represented numerically as the date within the month (for example, `30` for + `June 30`.). + :param str specific_day_of_week: (optional) A recognized mention of a specific + day of the week as a lowercase string (for example, `monday`). + :param float specific_month: (optional) A recognized mention of a specific + month, represented numerically (for example, `7` for `July`). + :param float specific_quarter: (optional) A recognized mention of a specific + quarter, represented numerically (for example, `3` for `the third quarter`). + :param float specific_year: (optional) A recognized mention of a specific year + (for example, `2016`). + :param float numeric_value: (optional) A recognized numeric value, represented + as an integer or double. + :param str subtype: (optional) The type of numeric value recognized in the user + input (`integer` or `rational`). + :param str part_of_day: (optional) A recognized term for a time that was + mentioned as a part of the day in the user's input (for example, `morning` or + `afternoon`). + :param float relative_hour: (optional) A recognized mention of a relative hour, + represented numerically as an offset from the current hour (for example, `3` for + `in three hours` or `-1` for `an hour ago`). + :param float relative_minute: (optional) A recognized mention of a relative + time, represented numerically as an offset in minutes from the current time (for + example, `5` for `in five minutes` or `-15` for `fifteen minutes ago`). + :param float relative_second: (optional) A recognized mention of a relative + time, represented numerically as an offset in seconds from the current time (for + example, `10` for `in ten seconds` or `-30` for `thirty seconds ago`). + :param float specific_hour: (optional) A recognized specific hour mentioned as + part of a time value (for example, `10` for `10:15 AM`.). + :param float specific_minute: (optional) A recognized specific minute mentioned + as part of a time value (for example, `15` for `10:15 AM`.). + :param float specific_second: (optional) A recognized specific second mentioned + as part of a time value (for example, `30` for `10:15:30 AM`.). + :param str timezone: (optional) A recognized time zone mentioned as part of a + time value (for example, `EST`). + """ + + def __init__( + self, + *, + calendar_type: Optional[str] = None, + datetime_link: Optional[str] = None, + festival: Optional[str] = None, + granularity: Optional[str] = None, + range_link: Optional[str] = None, + range_modifier: Optional[str] = None, + relative_day: Optional[float] = None, + relative_month: Optional[float] = None, + relative_week: Optional[float] = None, + relative_weekend: Optional[float] = None, + relative_year: Optional[float] = None, + specific_day: Optional[float] = None, + specific_day_of_week: Optional[str] = None, + specific_month: Optional[float] = None, + specific_quarter: Optional[float] = None, + specific_year: Optional[float] = None, + numeric_value: Optional[float] = None, + subtype: Optional[str] = None, + part_of_day: Optional[str] = None, + relative_hour: Optional[float] = None, + relative_minute: Optional[float] = None, + relative_second: Optional[float] = None, + specific_hour: Optional[float] = None, + specific_minute: Optional[float] = None, + specific_second: Optional[float] = None, + timezone: Optional[str] = None, + ) -> None: + """ + Initialize a RuntimeEntityInterpretation object. + + :param str calendar_type: (optional) The calendar used to represent a + recognized date (for example, `Gregorian`). + :param str datetime_link: (optional) A unique identifier used to associate + a recognized time and date. If the user input contains a date and time that + are mentioned together (for example, `Today at 5`, the same + **datetime_link** value is returned for both the `@sys-date` and + `@sys-time` entities). + :param str festival: (optional) A locale-specific holiday name (such as + `thanksgiving` or `christmas`). This property is included when a + `@sys-date` entity is recognized based on a holiday name in the user input. + :param str granularity: (optional) The precision or duration of a time + range specified by a recognized `@sys-time` or `@sys-date` entity. + :param str range_link: (optional) A unique identifier used to associate + multiple recognized `@sys-date`, `@sys-time`, or `@sys-number` entities + that are recognized as a range of values in the user's input (for example, + `from July 4 until July 14` or `from 20 to 25`). + :param str range_modifier: (optional) The word in the user input that + indicates that a `sys-date` or `sys-time` entity is part of an implied + range where only one date or time is specified (for example, `since` or + `until`). + :param float relative_day: (optional) A recognized mention of a relative + day, represented numerically as an offset from the current date (for + example, `-1` for `yesterday` or `10` for `in ten days`). + :param float relative_month: (optional) A recognized mention of a relative + month, represented numerically as an offset from the current month (for + example, `1` for `next month` or `-3` for `three months ago`). + :param float relative_week: (optional) A recognized mention of a relative + week, represented numerically as an offset from the current week (for + example, `2` for `in two weeks` or `-1` for `last week). + :param float relative_weekend: (optional) A recognized mention of a + relative date range for a weekend, represented numerically as an offset + from the current weekend (for example, `0` for `this weekend` or `-1` for + `last weekend`). + :param float relative_year: (optional) A recognized mention of a relative + year, represented numerically as an offset from the current year (for + example, `1` for `next year` or `-5` for `five years ago`). + :param float specific_day: (optional) A recognized mention of a specific + date, represented numerically as the date within the month (for example, + `30` for `June 30`.). + :param str specific_day_of_week: (optional) A recognized mention of a + specific day of the week as a lowercase string (for example, `monday`). + :param float specific_month: (optional) A recognized mention of a specific + month, represented numerically (for example, `7` for `July`). + :param float specific_quarter: (optional) A recognized mention of a + specific quarter, represented numerically (for example, `3` for `the third + quarter`). + :param float specific_year: (optional) A recognized mention of a specific + year (for example, `2016`). + :param float numeric_value: (optional) A recognized numeric value, + represented as an integer or double. + :param str subtype: (optional) The type of numeric value recognized in the + user input (`integer` or `rational`). + :param str part_of_day: (optional) A recognized term for a time that was + mentioned as a part of the day in the user's input (for example, `morning` + or `afternoon`). + :param float relative_hour: (optional) A recognized mention of a relative + hour, represented numerically as an offset from the current hour (for + example, `3` for `in three hours` or `-1` for `an hour ago`). + :param float relative_minute: (optional) A recognized mention of a relative + time, represented numerically as an offset in minutes from the current time + (for example, `5` for `in five minutes` or `-15` for `fifteen minutes + ago`). + :param float relative_second: (optional) A recognized mention of a relative + time, represented numerically as an offset in seconds from the current time + (for example, `10` for `in ten seconds` or `-30` for `thirty seconds ago`). + :param float specific_hour: (optional) A recognized specific hour mentioned + as part of a time value (for example, `10` for `10:15 AM`.). + :param float specific_minute: (optional) A recognized specific minute + mentioned as part of a time value (for example, `15` for `10:15 AM`.). + :param float specific_second: (optional) A recognized specific second + mentioned as part of a time value (for example, `30` for `10:15:30 AM`.). + :param str timezone: (optional) A recognized time zone mentioned as part of + a time value (for example, `EST`). + """ + self.calendar_type = calendar_type + self.datetime_link = datetime_link + self.festival = festival + self.granularity = granularity + self.range_link = range_link + self.range_modifier = range_modifier + self.relative_day = relative_day + self.relative_month = relative_month + self.relative_week = relative_week + self.relative_weekend = relative_weekend + self.relative_year = relative_year + self.specific_day = specific_day + self.specific_day_of_week = specific_day_of_week + self.specific_month = specific_month + self.specific_quarter = specific_quarter + self.specific_year = specific_year + self.numeric_value = numeric_value + self.subtype = subtype + self.part_of_day = part_of_day + self.relative_hour = relative_hour + self.relative_minute = relative_minute + self.relative_second = relative_second + self.specific_hour = specific_hour + self.specific_minute = specific_minute + self.specific_second = specific_second + self.timezone = timezone + + @classmethod + def from_dict(cls, _dict: Dict) -> 'RuntimeEntityInterpretation': + """Initialize a RuntimeEntityInterpretation object from a json dictionary.""" + args = {} + if (calendar_type := _dict.get('calendar_type')) is not None: + args['calendar_type'] = calendar_type + if (datetime_link := _dict.get('datetime_link')) is not None: + args['datetime_link'] = datetime_link + if (festival := _dict.get('festival')) is not None: + args['festival'] = festival + if (granularity := _dict.get('granularity')) is not None: + args['granularity'] = granularity + if (range_link := _dict.get('range_link')) is not None: + args['range_link'] = range_link + if (range_modifier := _dict.get('range_modifier')) is not None: + args['range_modifier'] = range_modifier + if (relative_day := _dict.get('relative_day')) is not None: + args['relative_day'] = relative_day + if (relative_month := _dict.get('relative_month')) is not None: + args['relative_month'] = relative_month + if (relative_week := _dict.get('relative_week')) is not None: + args['relative_week'] = relative_week + if (relative_weekend := _dict.get('relative_weekend')) is not None: + args['relative_weekend'] = relative_weekend + if (relative_year := _dict.get('relative_year')) is not None: + args['relative_year'] = relative_year + if (specific_day := _dict.get('specific_day')) is not None: + args['specific_day'] = specific_day + if (specific_day_of_week := + _dict.get('specific_day_of_week')) is not None: + args['specific_day_of_week'] = specific_day_of_week + if (specific_month := _dict.get('specific_month')) is not None: + args['specific_month'] = specific_month + if (specific_quarter := _dict.get('specific_quarter')) is not None: + args['specific_quarter'] = specific_quarter + if (specific_year := _dict.get('specific_year')) is not None: + args['specific_year'] = specific_year + if (numeric_value := _dict.get('numeric_value')) is not None: + args['numeric_value'] = numeric_value + if (subtype := _dict.get('subtype')) is not None: + args['subtype'] = subtype + if (part_of_day := _dict.get('part_of_day')) is not None: + args['part_of_day'] = part_of_day + if (relative_hour := _dict.get('relative_hour')) is not None: + args['relative_hour'] = relative_hour + if (relative_minute := _dict.get('relative_minute')) is not None: + args['relative_minute'] = relative_minute + if (relative_second := _dict.get('relative_second')) is not None: + args['relative_second'] = relative_second + if (specific_hour := _dict.get('specific_hour')) is not None: + args['specific_hour'] = specific_hour + if (specific_minute := _dict.get('specific_minute')) is not None: + args['specific_minute'] = specific_minute + if (specific_second := _dict.get('specific_second')) is not None: + args['specific_second'] = specific_second + if (timezone := _dict.get('timezone')) is not None: + args['timezone'] = timezone + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a RuntimeEntityInterpretation object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'calendar_type') and self.calendar_type is not None: + _dict['calendar_type'] = self.calendar_type + if hasattr(self, 'datetime_link') and self.datetime_link is not None: + _dict['datetime_link'] = self.datetime_link + if hasattr(self, 'festival') and self.festival is not None: + _dict['festival'] = self.festival + if hasattr(self, 'granularity') and self.granularity is not None: + _dict['granularity'] = self.granularity + if hasattr(self, 'range_link') and self.range_link is not None: + _dict['range_link'] = self.range_link + if hasattr(self, 'range_modifier') and self.range_modifier is not None: + _dict['range_modifier'] = self.range_modifier + if hasattr(self, 'relative_day') and self.relative_day is not None: + _dict['relative_day'] = self.relative_day + if hasattr(self, 'relative_month') and self.relative_month is not None: + _dict['relative_month'] = self.relative_month + if hasattr(self, 'relative_week') and self.relative_week is not None: + _dict['relative_week'] = self.relative_week + if hasattr(self, + 'relative_weekend') and self.relative_weekend is not None: + _dict['relative_weekend'] = self.relative_weekend + if hasattr(self, 'relative_year') and self.relative_year is not None: + _dict['relative_year'] = self.relative_year + if hasattr(self, 'specific_day') and self.specific_day is not None: + _dict['specific_day'] = self.specific_day + if hasattr(self, 'specific_day_of_week' + ) and self.specific_day_of_week is not None: + _dict['specific_day_of_week'] = self.specific_day_of_week + if hasattr(self, 'specific_month') and self.specific_month is not None: + _dict['specific_month'] = self.specific_month + if hasattr(self, + 'specific_quarter') and self.specific_quarter is not None: + _dict['specific_quarter'] = self.specific_quarter + if hasattr(self, 'specific_year') and self.specific_year is not None: + _dict['specific_year'] = self.specific_year + if hasattr(self, 'numeric_value') and self.numeric_value is not None: + _dict['numeric_value'] = self.numeric_value + if hasattr(self, 'subtype') and self.subtype is not None: + _dict['subtype'] = self.subtype + if hasattr(self, 'part_of_day') and self.part_of_day is not None: + _dict['part_of_day'] = self.part_of_day + if hasattr(self, 'relative_hour') and self.relative_hour is not None: + _dict['relative_hour'] = self.relative_hour + if hasattr(self, + 'relative_minute') and self.relative_minute is not None: + _dict['relative_minute'] = self.relative_minute + if hasattr(self, + 'relative_second') and self.relative_second is not None: + _dict['relative_second'] = self.relative_second + if hasattr(self, 'specific_hour') and self.specific_hour is not None: + _dict['specific_hour'] = self.specific_hour + if hasattr(self, + 'specific_minute') and self.specific_minute is not None: + _dict['specific_minute'] = self.specific_minute + if hasattr(self, + 'specific_second') and self.specific_second is not None: + _dict['specific_second'] = self.specific_second + if hasattr(self, 'timezone') and self.timezone is not None: + _dict['timezone'] = self.timezone + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this RuntimeEntityInterpretation object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'RuntimeEntityInterpretation') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'RuntimeEntityInterpretation') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class GranularityEnum(str, Enum): + """ + The precision or duration of a time range specified by a recognized `@sys-time` or + `@sys-date` entity. + """ + + DAY = 'day' + FORTNIGHT = 'fortnight' + HOUR = 'hour' + INSTANT = 'instant' + MINUTE = 'minute' + MONTH = 'month' + QUARTER = 'quarter' + SECOND = 'second' + WEEK = 'week' + WEEKEND = 'weekend' + YEAR = 'year' + + +class RuntimeEntityRole: + """ + An object describing the role played by a system entity that is specifies the + beginning or end of a range recognized in the user input. This property is included + only if the new system entities are enabled for the workspace. + + :param str type: (optional) The relationship of the entity to the range. + """ + + def __init__( + self, + *, + type: Optional[str] = None, + ) -> None: + """ + Initialize a RuntimeEntityRole object. + + :param str type: (optional) The relationship of the entity to the range. + """ + self.type = type + + @classmethod + def from_dict(cls, _dict: Dict) -> 'RuntimeEntityRole': + """Initialize a RuntimeEntityRole object from a json dictionary.""" + args = {} + if (type := _dict.get('type')) is not None: + args['type'] = type + return cls(**args) + + @classmethod def _from_dict(cls, _dict): - """Initialize a DialogRuntimeResponseGeneric object from a json dictionary.""" + """Initialize a RuntimeEntityRole object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this RuntimeEntityRole object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'RuntimeEntityRole') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'RuntimeEntityRole') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class TypeEnum(str, Enum): + """ + The relationship of the entity to the range. + """ + + DATE_FROM = 'date_from' + DATE_TO = 'date_to' + NUMBER_FROM = 'number_from' + NUMBER_TO = 'number_to' + TIME_FROM = 'time_from' + TIME_TO = 'time_to' + + +class RuntimeIntent: + """ + An intent identified in the user input. + + :param str intent: The name of the recognized intent. + :param float confidence: (optional) A decimal percentage that represents + confidence in the intent. If you are specifying an intent as part of a request, + but you do not have a calculated confidence value, specify `1`. + """ + + def __init__( + self, + intent: str, + *, + confidence: Optional[float] = None, + ) -> None: + """ + Initialize a RuntimeIntent object. + + :param str intent: The name of the recognized intent. + :param float confidence: (optional) A decimal percentage that represents + confidence in the intent. If you are specifying an intent as part of a + request, but you do not have a calculated confidence value, specify `1`. + """ + self.intent = intent + self.confidence = confidence + + @classmethod + def from_dict(cls, _dict: Dict) -> 'RuntimeIntent': + """Initialize a RuntimeIntent object from a json dictionary.""" args = {} - if 'response_type' in _dict: - args['response_type'] = _dict.get('response_type') + if (intent := _dict.get('intent')) is not None: + args['intent'] = intent else: raise ValueError( - 'Required property \'response_type\' not present in DialogRuntimeResponseGeneric JSON' + 'Required property \'intent\' not present in RuntimeIntent JSON' + ) + if (confidence := _dict.get('confidence')) is not None: + args['confidence'] = confidence + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a RuntimeIntent object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'intent') and self.intent is not None: + _dict['intent'] = self.intent + if hasattr(self, 'confidence') and self.confidence is not None: + _dict['confidence'] = self.confidence + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this RuntimeIntent object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'RuntimeIntent') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'RuntimeIntent') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class RuntimeResponseGeneric: + """ + RuntimeResponseGeneric. + + """ + + def __init__(self,) -> None: + """ + Initialize a RuntimeResponseGeneric object. + + """ + msg = "Cannot instantiate base class. Instead, instantiate one of the defined subclasses: {0}".format( + ", ".join([ + 'RuntimeResponseGenericRuntimeResponseTypeText', + 'RuntimeResponseGenericRuntimeResponseTypePause', + 'RuntimeResponseGenericRuntimeResponseTypeImage', + 'RuntimeResponseGenericRuntimeResponseTypeOption', + 'RuntimeResponseGenericRuntimeResponseTypeConnectToAgent', + 'RuntimeResponseGenericRuntimeResponseTypeSuggestion', + 'RuntimeResponseGenericRuntimeResponseTypeChannelTransfer', + 'RuntimeResponseGenericRuntimeResponseTypeUserDefined', + 'RuntimeResponseGenericRuntimeResponseTypeVideo', + 'RuntimeResponseGenericRuntimeResponseTypeAudio', + 'RuntimeResponseGenericRuntimeResponseTypeIframe' + ])) + raise Exception(msg) + + @classmethod + def from_dict(cls, _dict: Dict) -> 'RuntimeResponseGeneric': + """Initialize a RuntimeResponseGeneric object from a json dictionary.""" + disc_class = cls._get_class_by_discriminator(_dict) + if disc_class != cls: + return disc_class.from_dict(_dict) + msg = "Cannot convert dictionary into an instance of base class 'RuntimeResponseGeneric'. The discriminator value should map to a valid subclass: {1}".format( + ", ".join([ + 'RuntimeResponseGenericRuntimeResponseTypeText', + 'RuntimeResponseGenericRuntimeResponseTypePause', + 'RuntimeResponseGenericRuntimeResponseTypeImage', + 'RuntimeResponseGenericRuntimeResponseTypeOption', + 'RuntimeResponseGenericRuntimeResponseTypeConnectToAgent', + 'RuntimeResponseGenericRuntimeResponseTypeSuggestion', + 'RuntimeResponseGenericRuntimeResponseTypeChannelTransfer', + 'RuntimeResponseGenericRuntimeResponseTypeUserDefined', + 'RuntimeResponseGenericRuntimeResponseTypeVideo', + 'RuntimeResponseGenericRuntimeResponseTypeAudio', + 'RuntimeResponseGenericRuntimeResponseTypeIframe' + ])) + raise Exception(msg) + + @classmethod + def _from_dict(cls, _dict: Dict): + """Initialize a RuntimeResponseGeneric object from a json dictionary.""" + return cls.from_dict(_dict) + + @classmethod + def _get_class_by_discriminator(cls, _dict: Dict) -> object: + mapping = {} + mapping['audio'] = 'RuntimeResponseGenericRuntimeResponseTypeAudio' + mapping[ + 'channel_transfer'] = 'RuntimeResponseGenericRuntimeResponseTypeChannelTransfer' + mapping[ + 'connect_to_agent'] = 'RuntimeResponseGenericRuntimeResponseTypeConnectToAgent' + mapping['iframe'] = 'RuntimeResponseGenericRuntimeResponseTypeIframe' + mapping['image'] = 'RuntimeResponseGenericRuntimeResponseTypeImage' + mapping['option'] = 'RuntimeResponseGenericRuntimeResponseTypeOption' + mapping[ + 'suggestion'] = 'RuntimeResponseGenericRuntimeResponseTypeSuggestion' + mapping['pause'] = 'RuntimeResponseGenericRuntimeResponseTypePause' + mapping['text'] = 'RuntimeResponseGenericRuntimeResponseTypeText' + mapping[ + 'user_defined'] = 'RuntimeResponseGenericRuntimeResponseTypeUserDefined' + mapping['video'] = 'RuntimeResponseGenericRuntimeResponseTypeVideo' + disc_value = _dict.get('response_type') + if disc_value is None: + raise ValueError( + 'Discriminator property \'response_type\' not found in RuntimeResponseGeneric JSON' + ) + class_name = mapping.get(disc_value, disc_value) + try: + disc_class = getattr(sys.modules[__name__], class_name) + except AttributeError: + disc_class = cls + if isinstance(disc_class, object): + return disc_class + raise TypeError('%s is not a discriminator class' % class_name) + + +class StatusError: + """ + An object describing an error that occurred during processing of an asynchronous + operation. + + :param str message: (optional) The text of the error message. + """ + + def __init__( + self, + *, + message: Optional[str] = None, + ) -> None: + """ + Initialize a StatusError object. + + :param str message: (optional) The text of the error message. + """ + self.message = message + + @classmethod + def from_dict(cls, _dict: Dict) -> 'StatusError': + """Initialize a StatusError object from a json dictionary.""" + args = {} + if (message := _dict.get('message')) is not None: + args['message'] = message + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a StatusError object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'message') and self.message is not None: + _dict['message'] = self.message + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this StatusError object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'StatusError') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'StatusError') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Synonym: + """ + Synonym. + + :param str synonym: The text of the synonym. This string must conform to the + following restrictions: + - It cannot contain carriage return, newline, or tab characters. + - It cannot consist of only whitespace characters. + :param datetime created: (optional) The timestamp for creation of the object. + :param datetime updated: (optional) The timestamp for the most recent update to + the object. + """ + + def __init__( + self, + synonym: str, + *, + created: Optional[datetime] = None, + updated: Optional[datetime] = None, + ) -> None: + """ + Initialize a Synonym object. + + :param str synonym: The text of the synonym. This string must conform to + the following restrictions: + - It cannot contain carriage return, newline, or tab characters. + - It cannot consist of only whitespace characters. + """ + self.synonym = synonym + self.created = created + self.updated = updated + + @classmethod + def from_dict(cls, _dict: Dict) -> 'Synonym': + """Initialize a Synonym object from a json dictionary.""" + args = {} + if (synonym := _dict.get('synonym')) is not None: + args['synonym'] = synonym + else: + raise ValueError( + 'Required property \'synonym\' not present in Synonym JSON') + if (created := _dict.get('created')) is not None: + args['created'] = string_to_datetime(created) + if (updated := _dict.get('updated')) is not None: + args['updated'] = string_to_datetime(updated) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Synonym object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'synonym') and self.synonym is not None: + _dict['synonym'] = self.synonym + if hasattr(self, 'created') and getattr(self, 'created') is not None: + _dict['created'] = datetime_to_string(getattr(self, 'created')) + if hasattr(self, 'updated') and getattr(self, 'updated') is not None: + _dict['updated'] = datetime_to_string(getattr(self, 'updated')) + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Synonym object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'Synonym') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'Synonym') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class SynonymCollection: + """ + SynonymCollection. + + :param List[Synonym] synonyms: An array of synonyms. + :param Pagination pagination: The pagination data for the returned objects. For + more information about using pagination, see [Pagination](#pagination). + """ + + def __init__( + self, + synonyms: List['Synonym'], + pagination: 'Pagination', + ) -> None: + """ + Initialize a SynonymCollection object. + + :param List[Synonym] synonyms: An array of synonyms. + :param Pagination pagination: The pagination data for the returned objects. + For more information about using pagination, see [Pagination](#pagination). + """ + self.synonyms = synonyms + self.pagination = pagination + + @classmethod + def from_dict(cls, _dict: Dict) -> 'SynonymCollection': + """Initialize a SynonymCollection object from a json dictionary.""" + args = {} + if (synonyms := _dict.get('synonyms')) is not None: + args['synonyms'] = [Synonym.from_dict(v) for v in synonyms] + else: + raise ValueError( + 'Required property \'synonyms\' not present in SynonymCollection JSON' + ) + if (pagination := _dict.get('pagination')) is not None: + args['pagination'] = Pagination.from_dict(pagination) + else: + raise ValueError( + 'Required property \'pagination\' not present in SynonymCollection JSON' ) - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'time' in _dict: - args['time'] = _dict.get('time') - if 'typing' in _dict: - args['typing'] = _dict.get('typing') - if 'source' in _dict: - args['source'] = _dict.get('source') - if 'title' in _dict: - args['title'] = _dict.get('title') - if 'description' in _dict: - args['description'] = _dict.get('description') - if 'preference' in _dict: - args['preference'] = _dict.get('preference') - if 'options' in _dict: - args['options'] = [ - DialogNodeOutputOptionsElement._from_dict(x) - for x in (_dict.get('options')) - ] - if 'message_to_human_agent' in _dict: - args['message_to_human_agent'] = _dict.get('message_to_human_agent') - if 'topic' in _dict: - args['topic'] = _dict.get('topic') - if 'dialog_node' in _dict: - args['dialog_node'] = _dict.get('dialog_node') - if 'suggestions' in _dict: - args['suggestions'] = [ - DialogSuggestion._from_dict(x) - for x in (_dict.get('suggestions')) - ] return cls(**args) + @classmethod + def _from_dict(cls, _dict): + """Initialize a SynonymCollection object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'synonyms') and self.synonyms is not None: + synonyms_list = [] + for v in self.synonyms: + if isinstance(v, dict): + synonyms_list.append(v) + else: + synonyms_list.append(v.to_dict()) + _dict['synonyms'] = synonyms_list + if hasattr(self, 'pagination') and self.pagination is not None: + if isinstance(self.pagination, dict): + _dict['pagination'] = self.pagination + else: + _dict['pagination'] = self.pagination.to_dict() + return _dict + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this SynonymCollection object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'SynonymCollection') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'SynonymCollection') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Value: + """ + Value. + + :param str value: The text of the entity value. This string must conform to the + following restrictions: + - It cannot contain carriage return, newline, or tab characters. + - It cannot consist of only whitespace characters. + :param dict metadata: (optional) Any metadata related to the entity value. + :param str type: Specifies the type of entity value. + :param List[str] synonyms: (optional) An array of synonyms for the entity value. + A value can specify either synonyms or patterns (depending on the value type), + but not both. A synonym must conform to the following resrictions: + - It cannot contain carriage return, newline, or tab characters. + - It cannot consist of only whitespace characters. + :param List[str] patterns: (optional) An array of patterns for the entity value. + A value can specify either synonyms or patterns (depending on the value type), + but not both. A pattern is a regular expression; for more information about how + to specify a pattern, see the + [documentation](https://cloud.ibm.com/docs/assistant?topic=assistant-entities#entities-create-dictionary-based). + :param datetime created: (optional) The timestamp for creation of the object. + :param datetime updated: (optional) The timestamp for the most recent update to + the object. + """ + + def __init__( + self, + value: str, + type: str, + *, + metadata: Optional[dict] = None, + synonyms: Optional[List[str]] = None, + patterns: Optional[List[str]] = None, + created: Optional[datetime] = None, + updated: Optional[datetime] = None, + ) -> None: + """ + Initialize a Value object. + + :param str value: The text of the entity value. This string must conform to + the following restrictions: + - It cannot contain carriage return, newline, or tab characters. + - It cannot consist of only whitespace characters. + :param str type: Specifies the type of entity value. + :param dict metadata: (optional) Any metadata related to the entity value. + :param List[str] synonyms: (optional) An array of synonyms for the entity + value. A value can specify either synonyms or patterns (depending on the + value type), but not both. A synonym must conform to the following + resrictions: + - It cannot contain carriage return, newline, or tab characters. + - It cannot consist of only whitespace characters. + :param List[str] patterns: (optional) An array of patterns for the entity + value. A value can specify either synonyms or patterns (depending on the + value type), but not both. A pattern is a regular expression; for more + information about how to specify a pattern, see the + [documentation](https://cloud.ibm.com/docs/assistant?topic=assistant-entities#entities-create-dictionary-based). + """ + self.value = value + self.metadata = metadata + self.type = type + self.synonyms = synonyms + self.patterns = patterns + self.created = created + self.updated = updated + + @classmethod + def from_dict(cls, _dict: Dict) -> 'Value': + """Initialize a Value object from a json dictionary.""" + args = {} + if (value := _dict.get('value')) is not None: + args['value'] = value + else: + raise ValueError( + 'Required property \'value\' not present in Value JSON') + if (metadata := _dict.get('metadata')) is not None: + args['metadata'] = metadata + if (type := _dict.get('type')) is not None: + args['type'] = type + else: + raise ValueError( + 'Required property \'type\' not present in Value JSON') + if (synonyms := _dict.get('synonyms')) is not None: + args['synonyms'] = synonyms + if (patterns := _dict.get('patterns')) is not None: + args['patterns'] = patterns + if (created := _dict.get('created')) is not None: + args['created'] = string_to_datetime(created) + if (updated := _dict.get('updated')) is not None: + args['updated'] = string_to_datetime(updated) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Value object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'response_type') and self.response_type is not None: - _dict['response_type'] = self.response_type - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - if hasattr(self, 'time') and self.time is not None: - _dict['time'] = self.time - if hasattr(self, 'typing') and self.typing is not None: - _dict['typing'] = self.typing - if hasattr(self, 'source') and self.source is not None: - _dict['source'] = self.source - if hasattr(self, 'title') and self.title is not None: - _dict['title'] = self.title - if hasattr(self, 'description') and self.description is not None: - _dict['description'] = self.description - if hasattr(self, 'preference') and self.preference is not None: - _dict['preference'] = self.preference - if hasattr(self, 'options') and self.options is not None: - _dict['options'] = [x._to_dict() for x in self.options] - if hasattr(self, 'message_to_human_agent' - ) and self.message_to_human_agent is not None: - _dict['message_to_human_agent'] = self.message_to_human_agent - if hasattr(self, 'topic') and self.topic is not None: - _dict['topic'] = self.topic - if hasattr(self, 'dialog_node') and self.dialog_node is not None: - _dict['dialog_node'] = self.dialog_node - if hasattr(self, 'suggestions') and self.suggestions is not None: - _dict['suggestions'] = [x._to_dict() for x in self.suggestions] + if hasattr(self, 'value') and self.value is not None: + _dict['value'] = self.value + if hasattr(self, 'metadata') and self.metadata is not None: + _dict['metadata'] = self.metadata + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'synonyms') and self.synonyms is not None: + _dict['synonyms'] = self.synonyms + if hasattr(self, 'patterns') and self.patterns is not None: + _dict['patterns'] = self.patterns + if hasattr(self, 'created') and getattr(self, 'created') is not None: + _dict['created'] = datetime_to_string(getattr(self, 'created')) + if hasattr(self, 'updated') and getattr(self, 'updated') is not None: + _dict['updated'] = datetime_to_string(getattr(self, 'updated')) return _dict - def __str__(self): - """Return a `str` version of this DialogRuntimeResponseGeneric object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Value object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'Value') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'Value') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other + class TypeEnum(str, Enum): + """ + Specifies the type of entity value. + """ + + SYNONYMS = 'synonyms' + PATTERNS = 'patterns' -class DialogSuggestion(object): + +class ValueCollection: """ - DialogSuggestion. + ValueCollection. - :attr str label: The user-facing label for the disambiguation option. This label is - taken from the **user_label** property of the corresponding dialog node. - :attr DialogSuggestionValue value: An object defining the message input, intents, and - entities to be sent to the Watson Assistant service if the user selects the - corresponding disambiguation option. - :attr dict output: (optional) The dialog output that will be returned from the Watson - Assistant service if the user selects the corresponding option. - :attr str dialog_node: (optional) The ID of the dialog node that the **label** - property is taken from. The **label** property is populated using the value of the - dialog node's **user_label** property. + :param List[Value] values: An array of entity values. + :param Pagination pagination: The pagination data for the returned objects. For + more information about using pagination, see [Pagination](#pagination). """ - def __init__(self, label, value, output=None, dialog_node=None): + def __init__( + self, + values: List['Value'], + pagination: 'Pagination', + ) -> None: """ - Initialize a DialogSuggestion object. + Initialize a ValueCollection object. - :param str label: The user-facing label for the disambiguation option. This label - is taken from the **user_label** property of the corresponding dialog node. - :param DialogSuggestionValue value: An object defining the message input, intents, - and entities to be sent to the Watson Assistant service if the user selects the - corresponding disambiguation option. - :param dict output: (optional) The dialog output that will be returned from the - Watson Assistant service if the user selects the corresponding option. - :param str dialog_node: (optional) The ID of the dialog node that the **label** - property is taken from. The **label** property is populated using the value of the - dialog node's **user_label** property. + :param List[Value] values: An array of entity values. + :param Pagination pagination: The pagination data for the returned objects. + For more information about using pagination, see [Pagination](#pagination). """ - self.label = label - self.value = value - self.output = output - self.dialog_node = dialog_node + self.values = values + self.pagination = pagination @classmethod - def _from_dict(cls, _dict): - """Initialize a DialogSuggestion object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'ValueCollection': + """Initialize a ValueCollection object from a json dictionary.""" args = {} - if 'label' in _dict: - args['label'] = _dict.get('label') + if (values := _dict.get('values')) is not None: + args['values'] = [Value.from_dict(v) for v in values] else: raise ValueError( - 'Required property \'label\' not present in DialogSuggestion JSON' + 'Required property \'values\' not present in ValueCollection JSON' ) - if 'value' in _dict: - args['value'] = DialogSuggestionValue._from_dict(_dict.get('value')) + if (pagination := _dict.get('pagination')) is not None: + args['pagination'] = Pagination.from_dict(pagination) else: raise ValueError( - 'Required property \'value\' not present in DialogSuggestion JSON' + 'Required property \'pagination\' not present in ValueCollection JSON' ) - if 'output' in _dict: - args['output'] = _dict.get('output') - if 'dialog_node' in _dict: - args['dialog_node'] = _dict.get('dialog_node') return cls(**args) + @classmethod + def _from_dict(cls, _dict): + """Initialize a ValueCollection object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'values') and self.values is not None: + values_list = [] + for v in self.values: + if isinstance(v, dict): + values_list.append(v) + else: + values_list.append(v.to_dict()) + _dict['values'] = values_list + if hasattr(self, 'pagination') and self.pagination is not None: + if isinstance(self.pagination, dict): + _dict['pagination'] = self.pagination + else: + _dict['pagination'] = self.pagination.to_dict() + return _dict + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ValueCollection object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ValueCollection') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ValueCollection') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Webhook: + """ + A webhook that can be used by dialog nodes to make programmatic calls to an external + function. + **Note:** Currently, only a single webhook named `main_webhook` is supported. + + :param str url: The URL for the external service or application to which you + want to send HTTP POST requests. + :param str name: The name of the webhook. Currently, `main_webhook` is the only + supported value. + :param List[WebhookHeader] headers_: (optional) An optional array of HTTP + headers to pass with the HTTP request. + """ + + def __init__( + self, + url: str, + name: str, + *, + headers_: Optional[List['WebhookHeader']] = None, + ) -> None: + """ + Initialize a Webhook object. + + :param str url: The URL for the external service or application to which + you want to send HTTP POST requests. + :param str name: The name of the webhook. Currently, `main_webhook` is the + only supported value. + :param List[WebhookHeader] headers_: (optional) An optional array of HTTP + headers to pass with the HTTP request. + """ + self.url = url + self.name = name + self.headers_ = headers_ + + @classmethod + def from_dict(cls, _dict: Dict) -> 'Webhook': + """Initialize a Webhook object from a json dictionary.""" + args = {} + if (url := _dict.get('url')) is not None: + args['url'] = url + else: + raise ValueError( + 'Required property \'url\' not present in Webhook JSON') + if (name := _dict.get('name')) is not None: + args['name'] = name + else: + raise ValueError( + 'Required property \'name\' not present in Webhook JSON') + if (headers_ := _dict.get('headers')) is not None: + args['headers_'] = [WebhookHeader.from_dict(v) for v in headers_] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Webhook object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'label') and self.label is not None: - _dict['label'] = self.label - if hasattr(self, 'value') and self.value is not None: - _dict['value'] = self.value._to_dict() - if hasattr(self, 'output') and self.output is not None: - _dict['output'] = self.output - if hasattr(self, 'dialog_node') and self.dialog_node is not None: - _dict['dialog_node'] = self.dialog_node + if hasattr(self, 'url') and self.url is not None: + _dict['url'] = self.url + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'headers_') and self.headers_ is not None: + headers_list = [] + for v in self.headers_: + if isinstance(v, dict): + headers_list.append(v) + else: + headers_list.append(v.to_dict()) + _dict['headers'] = headers_list return _dict - def __str__(self): - """Return a `str` version of this DialogSuggestion object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __eq__(self, other): + def __str__(self) -> str: + """Return a `str` version of this Webhook object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'Webhook') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'Webhook') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class DialogSuggestionValue(object): +class WebhookHeader: """ - An object defining the message input, intents, and entities to be sent to the Watson - Assistant service if the user selects the corresponding disambiguation option. + A key/value pair defining an HTTP header and a value. - :attr MessageInput input: (optional) An input object that includes the input text. - :attr list[RuntimeIntent] intents: (optional) An array of intents to be sent along - with the user input. - :attr list[RuntimeEntity] entities: (optional) An array of entities to be sent along - with the user input. + :param str name: The name of an HTTP header (for example, `Authorization`). + :param str value: The value of an HTTP header. """ - def __init__(self, input=None, intents=None, entities=None): + def __init__( + self, + name: str, + value: str, + ) -> None: """ - Initialize a DialogSuggestionValue object. + Initialize a WebhookHeader object. - :param MessageInput input: (optional) An input object that includes the input - text. - :param list[RuntimeIntent] intents: (optional) An array of intents to be sent - along with the user input. - :param list[RuntimeEntity] entities: (optional) An array of entities to be sent - along with the user input. + :param str name: The name of an HTTP header (for example, `Authorization`). + :param str value: The value of an HTTP header. """ - self.input = input - self.intents = intents - self.entities = entities + self.name = name + self.value = value @classmethod - def _from_dict(cls, _dict): - """Initialize a DialogSuggestionValue object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'WebhookHeader': + """Initialize a WebhookHeader object from a json dictionary.""" args = {} - if 'input' in _dict: - args['input'] = MessageInput._from_dict(_dict.get('input')) - if 'intents' in _dict: - args['intents'] = [ - RuntimeIntent._from_dict(x) for x in (_dict.get('intents')) - ] - if 'entities' in _dict: - args['entities'] = [ - RuntimeEntity._from_dict(x) for x in (_dict.get('entities')) - ] + if (name := _dict.get('name')) is not None: + args['name'] = name + else: + raise ValueError( + 'Required property \'name\' not present in WebhookHeader JSON') + if (value := _dict.get('value')) is not None: + args['value'] = value + else: + raise ValueError( + 'Required property \'value\' not present in WebhookHeader JSON') return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a WebhookHeader object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'input') and self.input is not None: - _dict['input'] = self.input._to_dict() - if hasattr(self, 'intents') and self.intents is not None: - _dict['intents'] = [x._to_dict() for x in self.intents] - if hasattr(self, 'entities') and self.entities is not None: - _dict['entities'] = [x._to_dict() for x in self.entities] + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'value') and self.value is not None: + _dict['value'] = self.value return _dict - def __str__(self): - """Return a `str` version of this DialogSuggestionValue object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __eq__(self, other): + def __str__(self) -> str: + """Return a `str` version of this WebhookHeader object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'WebhookHeader') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'WebhookHeader') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class Entity(object): +class Workspace: + """ + Workspace. + + :param str name: The name of the workspace. This string cannot contain carriage + return, newline, or tab characters. + :param str description: (optional) The description of the workspace. This string + cannot contain carriage return, newline, or tab characters. + :param str language: The language of the workspace. + :param str workspace_id: (optional) The workspace ID of the workspace. + :param List[DialogNode] dialog_nodes: (optional) An array of objects describing + the dialog nodes in the workspace. + :param List[Counterexample] counterexamples: (optional) An array of objects + defining input examples that have been marked as irrelevant input. + :param datetime created: (optional) The timestamp for creation of the object. + :param datetime updated: (optional) The timestamp for the most recent update to + the object. + :param dict metadata: (optional) Any metadata related to the workspace. + :param bool learning_opt_out: Whether training data from the workspace + (including artifacts such as intents and entities) can be used by IBM for + general service improvements. `true` indicates that workspace training data is + not to be used. + :param WorkspaceSystemSettings system_settings: (optional) Global settings for + the workspace. + :param str status: (optional) The current status of the workspace: + - **Available**: The workspace is available and ready to process messages. + - **Failed**: An asynchronous operation has failed. See the **status_errors** + property for more information about the cause of the failure. + - **Non Existent**: The workspace does not exist. + - **Processing**: An asynchronous operation has not yet completed. + - **Training**: The workspace is training based on new data such as intents or + examples. + :param List[StatusError] status_errors: (optional) An array of messages about + errors that caused an asynchronous operation to fail. + :param List[Webhook] webhooks: (optional) + :param List[Intent] intents: (optional) An array of intents. + :param List[Entity] entities: (optional) An array of objects describing the + entities for the workspace. + :param WorkspaceCounts counts: (optional) An object containing properties that + indicate how many intents, entities, and dialog nodes are defined in the + workspace. This property is included only in responses from the **Export + workspace asynchronously** method, and only when the **verbose** query parameter + is set to `true`. """ - Entity. - :attr str entity: The name of the entity. This string must conform to the following - restrictions: - - It can contain only Unicode alphanumeric, underscore, and hyphen characters. - - It must be no longer than 64 characters. - If you specify an entity name beginning with the reserved prefix `sys-`, it must be - the name of a system entity that you want to enable. (Any entity content specified - with the request is ignored.). - :attr str description: (optional) The description of the entity. This string cannot - contain carriage return, newline, or tab characters, and it must be no longer than 128 - characters. - :attr dict metadata: (optional) Any metadata related to the entity. - :attr bool fuzzy_match: (optional) Whether to use fuzzy matching for the entity. - :attr datetime created: (optional) The timestamp for creation of the object. - :attr datetime updated: (optional) The timestamp for the most recent update to the - object. - :attr list[Value] values: (optional) An array of objects describing the entity values. - """ - - def __init__(self, - entity, - description=None, - metadata=None, - fuzzy_match=None, - created=None, - updated=None, - values=None): + def __init__( + self, + name: str, + language: str, + learning_opt_out: bool, + *, + description: Optional[str] = None, + workspace_id: Optional[str] = None, + dialog_nodes: Optional[List['DialogNode']] = None, + counterexamples: Optional[List['Counterexample']] = None, + created: Optional[datetime] = None, + updated: Optional[datetime] = None, + metadata: Optional[dict] = None, + system_settings: Optional['WorkspaceSystemSettings'] = None, + status: Optional[str] = None, + status_errors: Optional[List['StatusError']] = None, + webhooks: Optional[List['Webhook']] = None, + intents: Optional[List['Intent']] = None, + entities: Optional[List['Entity']] = None, + counts: Optional['WorkspaceCounts'] = None, + ) -> None: """ - Initialize a Entity object. + Initialize a Workspace object. - :param str entity: The name of the entity. This string must conform to the - following restrictions: - - It can contain only Unicode alphanumeric, underscore, and hyphen characters. - - It must be no longer than 64 characters. - If you specify an entity name beginning with the reserved prefix `sys-`, it must - be the name of a system entity that you want to enable. (Any entity content - specified with the request is ignored.). - :param str description: (optional) The description of the entity. This string - cannot contain carriage return, newline, or tab characters, and it must be no - longer than 128 characters. - :param dict metadata: (optional) Any metadata related to the entity. - :param bool fuzzy_match: (optional) Whether to use fuzzy matching for the entity. - :param datetime created: (optional) The timestamp for creation of the object. - :param datetime updated: (optional) The timestamp for the most recent update to - the object. - :param list[Value] values: (optional) An array of objects describing the entity - values. + :param str name: The name of the workspace. This string cannot contain + carriage return, newline, or tab characters. + :param str language: The language of the workspace. + :param bool learning_opt_out: Whether training data from the workspace + (including artifacts such as intents and entities) can be used by IBM for + general service improvements. `true` indicates that workspace training data + is not to be used. + :param str description: (optional) The description of the workspace. This + string cannot contain carriage return, newline, or tab characters. + :param List[DialogNode] dialog_nodes: (optional) An array of objects + describing the dialog nodes in the workspace. + :param List[Counterexample] counterexamples: (optional) An array of objects + defining input examples that have been marked as irrelevant input. + :param dict metadata: (optional) Any metadata related to the workspace. + :param WorkspaceSystemSettings system_settings: (optional) Global settings + for the workspace. + :param List[Webhook] webhooks: (optional) + :param List[Intent] intents: (optional) An array of intents. + :param List[Entity] entities: (optional) An array of objects describing the + entities for the workspace. """ - self.entity = entity + self.name = name self.description = description - self.metadata = metadata - self.fuzzy_match = fuzzy_match + self.language = language + self.workspace_id = workspace_id + self.dialog_nodes = dialog_nodes + self.counterexamples = counterexamples self.created = created self.updated = updated - self.values = values + self.metadata = metadata + self.learning_opt_out = learning_opt_out + self.system_settings = system_settings + self.status = status + self.status_errors = status_errors + self.webhooks = webhooks + self.intents = intents + self.entities = entities + self.counts = counts @classmethod - def _from_dict(cls, _dict): - """Initialize a Entity object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'Workspace': + """Initialize a Workspace object from a json dictionary.""" args = {} - if 'entity' in _dict: - args['entity'] = _dict.get('entity') + if (name := _dict.get('name')) is not None: + args['name'] = name else: raise ValueError( - 'Required property \'entity\' not present in Entity JSON') - if 'description' in _dict: - args['description'] = _dict.get('description') - if 'metadata' in _dict: - args['metadata'] = _dict.get('metadata') - if 'fuzzy_match' in _dict: - args['fuzzy_match'] = _dict.get('fuzzy_match') - if 'created' in _dict: - args['created'] = string_to_datetime(_dict.get('created')) - if 'updated' in _dict: - args['updated'] = string_to_datetime(_dict.get('updated')) - if 'values' in _dict: - args['values'] = [ - Value._from_dict(x) for x in (_dict.get('values')) + 'Required property \'name\' not present in Workspace JSON') + if (description := _dict.get('description')) is not None: + args['description'] = description + if (language := _dict.get('language')) is not None: + args['language'] = language + else: + raise ValueError( + 'Required property \'language\' not present in Workspace JSON') + if (workspace_id := _dict.get('workspace_id')) is not None: + args['workspace_id'] = workspace_id + if (dialog_nodes := _dict.get('dialog_nodes')) is not None: + args['dialog_nodes'] = [ + DialogNode.from_dict(v) for v in dialog_nodes + ] + if (counterexamples := _dict.get('counterexamples')) is not None: + args['counterexamples'] = [ + Counterexample.from_dict(v) for v in counterexamples + ] + if (created := _dict.get('created')) is not None: + args['created'] = string_to_datetime(created) + if (updated := _dict.get('updated')) is not None: + args['updated'] = string_to_datetime(updated) + if (metadata := _dict.get('metadata')) is not None: + args['metadata'] = metadata + if (learning_opt_out := _dict.get('learning_opt_out')) is not None: + args['learning_opt_out'] = learning_opt_out + else: + raise ValueError( + 'Required property \'learning_opt_out\' not present in Workspace JSON' + ) + if (system_settings := _dict.get('system_settings')) is not None: + args['system_settings'] = WorkspaceSystemSettings.from_dict( + system_settings) + if (status := _dict.get('status')) is not None: + args['status'] = status + if (status_errors := _dict.get('status_errors')) is not None: + args['status_errors'] = [ + StatusError.from_dict(v) for v in status_errors ] + if (webhooks := _dict.get('webhooks')) is not None: + args['webhooks'] = [Webhook.from_dict(v) for v in webhooks] + if (intents := _dict.get('intents')) is not None: + args['intents'] = [Intent.from_dict(v) for v in intents] + if (entities := _dict.get('entities')) is not None: + args['entities'] = [Entity.from_dict(v) for v in entities] + if (counts := _dict.get('counts')) is not None: + args['counts'] = WorkspaceCounts.from_dict(counts) return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a Workspace object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'entity') and self.entity is not None: - _dict['entity'] = self.entity + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name if hasattr(self, 'description') and self.description is not None: _dict['description'] = self.description + if hasattr(self, 'language') and self.language is not None: + _dict['language'] = self.language + if hasattr(self, 'workspace_id') and getattr( + self, 'workspace_id') is not None: + _dict['workspace_id'] = getattr(self, 'workspace_id') + if hasattr(self, 'dialog_nodes') and self.dialog_nodes is not None: + dialog_nodes_list = [] + for v in self.dialog_nodes: + if isinstance(v, dict): + dialog_nodes_list.append(v) + else: + dialog_nodes_list.append(v.to_dict()) + _dict['dialog_nodes'] = dialog_nodes_list + if hasattr(self, + 'counterexamples') and self.counterexamples is not None: + counterexamples_list = [] + for v in self.counterexamples: + if isinstance(v, dict): + counterexamples_list.append(v) + else: + counterexamples_list.append(v.to_dict()) + _dict['counterexamples'] = counterexamples_list + if hasattr(self, 'created') and getattr(self, 'created') is not None: + _dict['created'] = datetime_to_string(getattr(self, 'created')) + if hasattr(self, 'updated') and getattr(self, 'updated') is not None: + _dict['updated'] = datetime_to_string(getattr(self, 'updated')) if hasattr(self, 'metadata') and self.metadata is not None: _dict['metadata'] = self.metadata - if hasattr(self, 'fuzzy_match') and self.fuzzy_match is not None: - _dict['fuzzy_match'] = self.fuzzy_match - if hasattr(self, 'created') and self.created is not None: - _dict['created'] = datetime_to_string(self.created) - if hasattr(self, 'updated') and self.updated is not None: - _dict['updated'] = datetime_to_string(self.updated) - if hasattr(self, 'values') and self.values is not None: - _dict['values'] = [x._to_dict() for x in self.values] + if hasattr(self, + 'learning_opt_out') and self.learning_opt_out is not None: + _dict['learning_opt_out'] = self.learning_opt_out + if hasattr(self, + 'system_settings') and self.system_settings is not None: + if isinstance(self.system_settings, dict): + _dict['system_settings'] = self.system_settings + else: + _dict['system_settings'] = self.system_settings.to_dict() + if hasattr(self, 'status') and getattr(self, 'status') is not None: + _dict['status'] = getattr(self, 'status') + if hasattr(self, 'status_errors') and getattr( + self, 'status_errors') is not None: + status_errors_list = [] + for v in getattr(self, 'status_errors'): + if isinstance(v, dict): + status_errors_list.append(v) + else: + status_errors_list.append(v.to_dict()) + _dict['status_errors'] = status_errors_list + if hasattr(self, 'webhooks') and self.webhooks is not None: + webhooks_list = [] + for v in self.webhooks: + if isinstance(v, dict): + webhooks_list.append(v) + else: + webhooks_list.append(v.to_dict()) + _dict['webhooks'] = webhooks_list + if hasattr(self, 'intents') and self.intents is not None: + intents_list = [] + for v in self.intents: + if isinstance(v, dict): + intents_list.append(v) + else: + intents_list.append(v.to_dict()) + _dict['intents'] = intents_list + if hasattr(self, 'entities') and self.entities is not None: + entities_list = [] + for v in self.entities: + if isinstance(v, dict): + entities_list.append(v) + else: + entities_list.append(v.to_dict()) + _dict['entities'] = entities_list + if hasattr(self, 'counts') and getattr(self, 'counts') is not None: + if isinstance(getattr(self, 'counts'), dict): + _dict['counts'] = getattr(self, 'counts') + else: + _dict['counts'] = getattr(self, 'counts').to_dict() return _dict - def __str__(self): - """Return a `str` version of this Entity object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Workspace object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'Workspace') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'Workspace') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other + class StatusEnum(str, Enum): + """ + The current status of the workspace: + - **Available**: The workspace is available and ready to process messages. + - **Failed**: An asynchronous operation has failed. See the **status_errors** + property for more information about the cause of the failure. + - **Non Existent**: The workspace does not exist. + - **Processing**: An asynchronous operation has not yet completed. + - **Training**: The workspace is training based on new data such as intents or + examples. + """ + + AVAILABLE = 'Available' + FAILED = 'Failed' + NON_EXISTENT = 'Non Existent' + PROCESSING = 'Processing' + TRAINING = 'Training' + UNAVAILABLE = 'Unavailable' -class EntityCollection(object): + +class WorkspaceCollection: """ - An array of objects describing the entities for the workspace. + WorkspaceCollection. - :attr list[Entity] entities: An array of objects describing the entities defined for - the workspace. - :attr Pagination pagination: The pagination data for the returned objects. + :param List[Workspace] workspaces: An array of objects describing the workspaces + associated with the service instance. + :param Pagination pagination: The pagination data for the returned objects. For + more information about using pagination, see [Pagination](#pagination). """ - def __init__(self, entities, pagination): + def __init__( + self, + workspaces: List['Workspace'], + pagination: 'Pagination', + ) -> None: """ - Initialize a EntityCollection object. + Initialize a WorkspaceCollection object. - :param list[Entity] entities: An array of objects describing the entities defined - for the workspace. + :param List[Workspace] workspaces: An array of objects describing the + workspaces associated with the service instance. :param Pagination pagination: The pagination data for the returned objects. + For more information about using pagination, see [Pagination](#pagination). """ - self.entities = entities + self.workspaces = workspaces self.pagination = pagination @classmethod - def _from_dict(cls, _dict): - """Initialize a EntityCollection object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'WorkspaceCollection': + """Initialize a WorkspaceCollection object from a json dictionary.""" args = {} - if 'entities' in _dict: - args['entities'] = [ - Entity._from_dict(x) for x in (_dict.get('entities')) - ] + if (workspaces := _dict.get('workspaces')) is not None: + args['workspaces'] = [Workspace.from_dict(v) for v in workspaces] else: raise ValueError( - 'Required property \'entities\' not present in EntityCollection JSON' + 'Required property \'workspaces\' not present in WorkspaceCollection JSON' ) - if 'pagination' in _dict: - args['pagination'] = Pagination._from_dict(_dict.get('pagination')) + if (pagination := _dict.get('pagination')) is not None: + args['pagination'] = Pagination.from_dict(pagination) else: raise ValueError( - 'Required property \'pagination\' not present in EntityCollection JSON' + 'Required property \'pagination\' not present in WorkspaceCollection JSON' ) return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a WorkspaceCollection object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'entities') and self.entities is not None: - _dict['entities'] = [x._to_dict() for x in self.entities] + if hasattr(self, 'workspaces') and self.workspaces is not None: + workspaces_list = [] + for v in self.workspaces: + if isinstance(v, dict): + workspaces_list.append(v) + else: + workspaces_list.append(v.to_dict()) + _dict['workspaces'] = workspaces_list if hasattr(self, 'pagination') and self.pagination is not None: - _dict['pagination'] = self.pagination._to_dict() + if isinstance(self.pagination, dict): + _dict['pagination'] = self.pagination + else: + _dict['pagination'] = self.pagination.to_dict() return _dict - def __str__(self): - """Return a `str` version of this EntityCollection object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this WorkspaceCollection object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'WorkspaceCollection') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'WorkspaceCollection') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class EntityMention(object): +class WorkspaceCounts: """ - An object describing a contextual entity mention. - - :attr str text: The text of the user input example. - :attr str intent: The name of the intent. - :attr list[int] location: An array of zero-based character offsets that indicate where - the entity mentions begin and end in the input text. + An object containing properties that indicate how many intents, entities, and dialog + nodes are defined in the workspace. This property is included only in responses from + the **Export workspace asynchronously** method, and only when the **verbose** query + parameter is set to `true`. + + :param int intent: (optional) The number of intents defined in the workspace. + :param int entity: (optional) The number of entities defined in the workspace. + :param int node: (optional) The number of nodes defined in the workspace. """ - def __init__(self, text, intent, location): + def __init__( + self, + *, + intent: Optional[int] = None, + entity: Optional[int] = None, + node: Optional[int] = None, + ) -> None: """ - Initialize a EntityMention object. + Initialize a WorkspaceCounts object. - :param str text: The text of the user input example. - :param str intent: The name of the intent. - :param list[int] location: An array of zero-based character offsets that indicate - where the entity mentions begin and end in the input text. + :param int intent: (optional) The number of intents defined in the + workspace. + :param int entity: (optional) The number of entities defined in the + workspace. + :param int node: (optional) The number of nodes defined in the workspace. """ - self.text = text self.intent = intent - self.location = location + self.entity = entity + self.node = node @classmethod - def _from_dict(cls, _dict): - """Initialize a EntityMention object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'WorkspaceCounts': + """Initialize a WorkspaceCounts object from a json dictionary.""" args = {} - if 'text' in _dict: - args['text'] = _dict.get('text') - else: - raise ValueError( - 'Required property \'text\' not present in EntityMention JSON') - if 'intent' in _dict: - args['intent'] = _dict.get('intent') - else: - raise ValueError( - 'Required property \'intent\' not present in EntityMention JSON' - ) - if 'location' in _dict: - args['location'] = _dict.get('location') - else: - raise ValueError( - 'Required property \'location\' not present in EntityMention JSON' - ) + if (intent := _dict.get('intent')) is not None: + args['intent'] = intent + if (entity := _dict.get('entity')) is not None: + args['entity'] = entity + if (node := _dict.get('node')) is not None: + args['node'] = node return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a WorkspaceCounts object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text if hasattr(self, 'intent') and self.intent is not None: _dict['intent'] = self.intent - if hasattr(self, 'location') and self.location is not None: - _dict['location'] = self.location + if hasattr(self, 'entity') and self.entity is not None: + _dict['entity'] = self.entity + if hasattr(self, 'node') and self.node is not None: + _dict['node'] = self.node return _dict - def __str__(self): - """Return a `str` version of this EntityMention object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this WorkspaceCounts object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'WorkspaceCounts') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'WorkspaceCounts') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class EntityMentionCollection(object): +class WorkspaceSystemSettings: """ - EntityMentionCollection. + Global settings for the workspace. - :attr list[EntityMention] examples: An array of objects describing the entity mentions - defined for an entity. - :attr Pagination pagination: The pagination data for the returned objects. + :param WorkspaceSystemSettingsTooling tooling: (optional) Workspace settings + related to the Watson Assistant user interface. + :param WorkspaceSystemSettingsDisambiguation disambiguation: (optional) + Workspace settings related to the disambiguation feature. + :param dict human_agent_assist: (optional) For internal use only. + :param bool spelling_suggestions: (optional) Whether spelling correction is + enabled for the workspace. + :param bool spelling_auto_correct: (optional) Whether autocorrection is enabled + for the workspace. If spelling correction is enabled and this property is + `false`, any suggested corrections are returned in the **suggested_text** + property of the message response. If this property is `true`, any corrections + are automatically applied to the user input, and the original text is returned + in the **original_text** property of the message response. + :param WorkspaceSystemSettingsSystemEntities system_entities: (optional) + Workspace settings related to the behavior of system entities. + :param WorkspaceSystemSettingsOffTopic off_topic: (optional) Workspace settings + related to detection of irrelevant input. + :param WorkspaceSystemSettingsNlp nlp: (optional) Workspace settings related to + the version of the training algorithms currently used by the skill. + + This type supports additional properties of type object. For internal use only. """ - def __init__(self, examples, pagination): + # The set of defined properties for the class + _properties = frozenset([ + 'tooling', 'disambiguation', 'human_agent_assist', + 'spelling_suggestions', 'spelling_auto_correct', 'system_entities', + 'off_topic', 'nlp' + ]) + + def __init__( + self, + *, + tooling: Optional['WorkspaceSystemSettingsTooling'] = None, + disambiguation: Optional[ + 'WorkspaceSystemSettingsDisambiguation'] = None, + human_agent_assist: Optional[dict] = None, + spelling_suggestions: Optional[bool] = None, + spelling_auto_correct: Optional[bool] = None, + system_entities: Optional[ + 'WorkspaceSystemSettingsSystemEntities'] = None, + off_topic: Optional['WorkspaceSystemSettingsOffTopic'] = None, + nlp: Optional['WorkspaceSystemSettingsNlp'] = None, + **kwargs: Optional[object], + ) -> None: """ - Initialize a EntityMentionCollection object. + Initialize a WorkspaceSystemSettings object. - :param list[EntityMention] examples: An array of objects describing the entity - mentions defined for an entity. - :param Pagination pagination: The pagination data for the returned objects. + :param WorkspaceSystemSettingsTooling tooling: (optional) Workspace + settings related to the Watson Assistant user interface. + :param WorkspaceSystemSettingsDisambiguation disambiguation: (optional) + Workspace settings related to the disambiguation feature. + :param dict human_agent_assist: (optional) For internal use only. + :param bool spelling_suggestions: (optional) Whether spelling correction is + enabled for the workspace. + :param bool spelling_auto_correct: (optional) Whether autocorrection is + enabled for the workspace. If spelling correction is enabled and this + property is `false`, any suggested corrections are returned in the + **suggested_text** property of the message response. If this property is + `true`, any corrections are automatically applied to the user input, and + the original text is returned in the **original_text** property of the + message response. + :param WorkspaceSystemSettingsSystemEntities system_entities: (optional) + Workspace settings related to the behavior of system entities. + :param WorkspaceSystemSettingsOffTopic off_topic: (optional) Workspace + settings related to detection of irrelevant input. + :param WorkspaceSystemSettingsNlp nlp: (optional) Workspace settings + related to the version of the training algorithms currently used by the + skill. + :param object **kwargs: (optional) For internal use only. """ - self.examples = examples - self.pagination = pagination + self.tooling = tooling + self.disambiguation = disambiguation + self.human_agent_assist = human_agent_assist + self.spelling_suggestions = spelling_suggestions + self.spelling_auto_correct = spelling_auto_correct + self.system_entities = system_entities + self.off_topic = off_topic + self.nlp = nlp + for k, v in kwargs.items(): + if k not in WorkspaceSystemSettings._properties: + if not isinstance(v, object): + raise ValueError( + 'Value for additional property {} must be of type object' + .format(k)) + setattr(self, k, v) + else: + raise ValueError( + 'Property {} cannot be specified as an additional property'. + format(k)) @classmethod - def _from_dict(cls, _dict): - """Initialize a EntityMentionCollection object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'WorkspaceSystemSettings': + """Initialize a WorkspaceSystemSettings object from a json dictionary.""" args = {} - if 'examples' in _dict: - args['examples'] = [ - EntityMention._from_dict(x) for x in (_dict.get('examples')) - ] - else: - raise ValueError( - 'Required property \'examples\' not present in EntityMentionCollection JSON' - ) - if 'pagination' in _dict: - args['pagination'] = Pagination._from_dict(_dict.get('pagination')) - else: - raise ValueError( - 'Required property \'pagination\' not present in EntityMentionCollection JSON' - ) + if (tooling := _dict.get('tooling')) is not None: + args['tooling'] = WorkspaceSystemSettingsTooling.from_dict(tooling) + if (disambiguation := _dict.get('disambiguation')) is not None: + args[ + 'disambiguation'] = WorkspaceSystemSettingsDisambiguation.from_dict( + disambiguation) + if (human_agent_assist := _dict.get('human_agent_assist')) is not None: + args['human_agent_assist'] = human_agent_assist + if (spelling_suggestions := + _dict.get('spelling_suggestions')) is not None: + args['spelling_suggestions'] = spelling_suggestions + if (spelling_auto_correct := + _dict.get('spelling_auto_correct')) is not None: + args['spelling_auto_correct'] = spelling_auto_correct + if (system_entities := _dict.get('system_entities')) is not None: + args[ + 'system_entities'] = WorkspaceSystemSettingsSystemEntities.from_dict( + system_entities) + if (off_topic := _dict.get('off_topic')) is not None: + args['off_topic'] = WorkspaceSystemSettingsOffTopic.from_dict( + off_topic) + if (nlp := _dict.get('nlp')) is not None: + args['nlp'] = WorkspaceSystemSettingsNlp.from_dict(nlp) + for k, v in _dict.items(): + if k not in cls._properties: + if not isinstance(v, object): + raise ValueError( + 'Value for additional property {} must be of type object' + .format(k)) + args[k] = v return cls(**args) + @classmethod + def _from_dict(cls, _dict): + """Initialize a WorkspaceSystemSettings object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'tooling') and self.tooling is not None: + if isinstance(self.tooling, dict): + _dict['tooling'] = self.tooling + else: + _dict['tooling'] = self.tooling.to_dict() + if hasattr(self, 'disambiguation') and self.disambiguation is not None: + if isinstance(self.disambiguation, dict): + _dict['disambiguation'] = self.disambiguation + else: + _dict['disambiguation'] = self.disambiguation.to_dict() + if hasattr( + self, + 'human_agent_assist') and self.human_agent_assist is not None: + _dict['human_agent_assist'] = self.human_agent_assist + if hasattr(self, 'spelling_suggestions' + ) and self.spelling_suggestions is not None: + _dict['spelling_suggestions'] = self.spelling_suggestions + if hasattr(self, 'spelling_auto_correct' + ) and self.spelling_auto_correct is not None: + _dict['spelling_auto_correct'] = self.spelling_auto_correct + if hasattr(self, + 'system_entities') and self.system_entities is not None: + if isinstance(self.system_entities, dict): + _dict['system_entities'] = self.system_entities + else: + _dict['system_entities'] = self.system_entities.to_dict() + if hasattr(self, 'off_topic') and self.off_topic is not None: + if isinstance(self.off_topic, dict): + _dict['off_topic'] = self.off_topic + else: + _dict['off_topic'] = self.off_topic.to_dict() + if hasattr(self, 'nlp') and self.nlp is not None: + if isinstance(self.nlp, dict): + _dict['nlp'] = self.nlp + else: + _dict['nlp'] = self.nlp.to_dict() + for k in [ + _k for _k in vars(self).keys() + if _k not in WorkspaceSystemSettings._properties + ]: + _dict[k] = getattr(self, k) + return _dict + def _to_dict(self): """Return a json dictionary representing this model.""" + return self.to_dict() + + def get_properties(self) -> Dict: + """Return the additional properties from this instance of WorkspaceSystemSettings in the form of a dict.""" _dict = {} - if hasattr(self, 'examples') and self.examples is not None: - _dict['examples'] = [x._to_dict() for x in self.examples] - if hasattr(self, 'pagination') and self.pagination is not None: - _dict['pagination'] = self.pagination._to_dict() + for k in [ + _k for _k in vars(self).keys() + if _k not in WorkspaceSystemSettings._properties + ]: + _dict[k] = getattr(self, k) return _dict - def __str__(self): - """Return a `str` version of this EntityMentionCollection object.""" - return json.dumps(self._to_dict(), indent=2) + def set_properties(self, _dict: dict): + """Set a dictionary of additional properties in this instance of WorkspaceSystemSettings""" + for k in [ + _k for _k in vars(self).keys() + if _k not in WorkspaceSystemSettings._properties + ]: + delattr(self, k) + for k, v in _dict.items(): + if k not in WorkspaceSystemSettings._properties: + if not isinstance(v, object): + raise ValueError( + 'Value for additional property {} must be of type object' + .format(k)) + setattr(self, k, v) + else: + raise ValueError( + 'Property {} cannot be specified as an additional property'. + format(k)) + + def __str__(self) -> str: + """Return a `str` version of this WorkspaceSystemSettings object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'WorkspaceSystemSettings') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'WorkspaceSystemSettings') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class Example(object): +class WorkspaceSystemSettingsDisambiguation: """ - Example. + Workspace settings related to the disambiguation feature. - :attr str text: The text of a user input example. This string must conform to the - following restrictions: - - It cannot contain carriage return, newline, or tab characters. - - It cannot consist of only whitespace characters. - - It must be no longer than 1024 characters. - :attr list[Mention] mentions: (optional) An array of contextual entity mentions. - :attr datetime created: (optional) The timestamp for creation of the object. - :attr datetime updated: (optional) The timestamp for the most recent update to the - object. + :param str prompt: (optional) The text of the introductory prompt that + accompanies disambiguation options presented to the user. + :param str none_of_the_above_prompt: (optional) The user-facing label for the + option users can select if none of the suggested options is correct. If no value + is specified for this property, this option does not appear. + :param bool enabled: (optional) Whether the disambiguation feature is enabled + for the workspace. + :param str sensitivity: (optional) The sensitivity of the disambiguation feature + to intent detection uncertainty. Higher sensitivity means that the + disambiguation feature is triggered more often and includes more choices. + :param bool randomize: (optional) Whether the order in which disambiguation + suggestions are presented should be randomized (but still influenced by relative + confidence). + :param int max_suggestions: (optional) The maximum number of disambigation + suggestions that can be included in a `suggestion` response. + :param str suggestion_text_policy: (optional) For internal use only. """ - def __init__(self, text, mentions=None, created=None, updated=None): + def __init__( + self, + *, + prompt: Optional[str] = None, + none_of_the_above_prompt: Optional[str] = None, + enabled: Optional[bool] = None, + sensitivity: Optional[str] = None, + randomize: Optional[bool] = None, + max_suggestions: Optional[int] = None, + suggestion_text_policy: Optional[str] = None, + ) -> None: """ - Initialize a Example object. + Initialize a WorkspaceSystemSettingsDisambiguation object. - :param str text: The text of a user input example. This string must conform to the - following restrictions: - - It cannot contain carriage return, newline, or tab characters. - - It cannot consist of only whitespace characters. - - It must be no longer than 1024 characters. - :param list[Mention] mentions: (optional) An array of contextual entity mentions. - :param datetime created: (optional) The timestamp for creation of the object. - :param datetime updated: (optional) The timestamp for the most recent update to - the object. + :param str prompt: (optional) The text of the introductory prompt that + accompanies disambiguation options presented to the user. + :param str none_of_the_above_prompt: (optional) The user-facing label for + the option users can select if none of the suggested options is correct. If + no value is specified for this property, this option does not appear. + :param bool enabled: (optional) Whether the disambiguation feature is + enabled for the workspace. + :param str sensitivity: (optional) The sensitivity of the disambiguation + feature to intent detection uncertainty. Higher sensitivity means that the + disambiguation feature is triggered more often and includes more choices. + :param bool randomize: (optional) Whether the order in which disambiguation + suggestions are presented should be randomized (but still influenced by + relative confidence). + :param int max_suggestions: (optional) The maximum number of disambigation + suggestions that can be included in a `suggestion` response. + :param str suggestion_text_policy: (optional) For internal use only. """ - self.text = text - self.mentions = mentions - self.created = created - self.updated = updated + self.prompt = prompt + self.none_of_the_above_prompt = none_of_the_above_prompt + self.enabled = enabled + self.sensitivity = sensitivity + self.randomize = randomize + self.max_suggestions = max_suggestions + self.suggestion_text_policy = suggestion_text_policy @classmethod - def _from_dict(cls, _dict): - """Initialize a Example object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'WorkspaceSystemSettingsDisambiguation': + """Initialize a WorkspaceSystemSettingsDisambiguation object from a json dictionary.""" args = {} - if 'text' in _dict: - args['text'] = _dict.get('text') - else: - raise ValueError( - 'Required property \'text\' not present in Example JSON') - if 'mentions' in _dict: - args['mentions'] = [ - Mention._from_dict(x) for x in (_dict.get('mentions')) - ] - if 'created' in _dict: - args['created'] = string_to_datetime(_dict.get('created')) - if 'updated' in _dict: - args['updated'] = string_to_datetime(_dict.get('updated')) + if (prompt := _dict.get('prompt')) is not None: + args['prompt'] = prompt + if (none_of_the_above_prompt := + _dict.get('none_of_the_above_prompt')) is not None: + args['none_of_the_above_prompt'] = none_of_the_above_prompt + if (enabled := _dict.get('enabled')) is not None: + args['enabled'] = enabled + if (sensitivity := _dict.get('sensitivity')) is not None: + args['sensitivity'] = sensitivity + if (randomize := _dict.get('randomize')) is not None: + args['randomize'] = randomize + if (max_suggestions := _dict.get('max_suggestions')) is not None: + args['max_suggestions'] = max_suggestions + if (suggestion_text_policy := + _dict.get('suggestion_text_policy')) is not None: + args['suggestion_text_policy'] = suggestion_text_policy return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a WorkspaceSystemSettingsDisambiguation object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - if hasattr(self, 'mentions') and self.mentions is not None: - _dict['mentions'] = [x._to_dict() for x in self.mentions] - if hasattr(self, 'created') and self.created is not None: - _dict['created'] = datetime_to_string(self.created) - if hasattr(self, 'updated') and self.updated is not None: - _dict['updated'] = datetime_to_string(self.updated) + if hasattr(self, 'prompt') and self.prompt is not None: + _dict['prompt'] = self.prompt + if hasattr(self, 'none_of_the_above_prompt' + ) and self.none_of_the_above_prompt is not None: + _dict['none_of_the_above_prompt'] = self.none_of_the_above_prompt + if hasattr(self, 'enabled') and self.enabled is not None: + _dict['enabled'] = self.enabled + if hasattr(self, 'sensitivity') and self.sensitivity is not None: + _dict['sensitivity'] = self.sensitivity + if hasattr(self, 'randomize') and self.randomize is not None: + _dict['randomize'] = self.randomize + if hasattr(self, + 'max_suggestions') and self.max_suggestions is not None: + _dict['max_suggestions'] = self.max_suggestions + if hasattr(self, 'suggestion_text_policy' + ) and self.suggestion_text_policy is not None: + _dict['suggestion_text_policy'] = self.suggestion_text_policy return _dict - def __str__(self): - """Return a `str` version of this Example object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __eq__(self, other): + def __str__(self) -> str: + """Return a `str` version of this WorkspaceSystemSettingsDisambiguation object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'WorkspaceSystemSettingsDisambiguation') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'WorkspaceSystemSettingsDisambiguation') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other + class SensitivityEnum(str, Enum): + """ + The sensitivity of the disambiguation feature to intent detection uncertainty. + Higher sensitivity means that the disambiguation feature is triggered more often + and includes more choices. + """ + + AUTO = 'auto' + HIGH = 'high' + MEDIUM_HIGH = 'medium_high' + MEDIUM = 'medium' + MEDIUM_LOW = 'medium_low' + LOW = 'low' -class ExampleCollection(object): - """ - ExampleCollection. - :attr list[Example] examples: An array of objects describing the examples defined for - the intent. - :attr Pagination pagination: The pagination data for the returned objects. +class WorkspaceSystemSettingsNlp: + """ + Workspace settings related to the version of the training algorithms currently used by + the skill. + + :param str model: (optional) The policy the skill follows for selecting the + algorithm version to use. For more information, see the + [documentation](/docs/watson-assistant?topic=watson-assistant-algorithm-version). + On IBM Cloud, you can specify `latest`, `previous`, or `beta`. + On IBM Cloud Pak for Data, you can specify either `beta` or the date of the + version you want to use, in `YYYY-MM-DD` format. """ - def __init__(self, examples, pagination): + def __init__( + self, + *, + model: Optional[str] = None, + ) -> None: """ - Initialize a ExampleCollection object. + Initialize a WorkspaceSystemSettingsNlp object. - :param list[Example] examples: An array of objects describing the examples defined - for the intent. - :param Pagination pagination: The pagination data for the returned objects. + :param str model: (optional) The policy the skill follows for selecting the + algorithm version to use. For more information, see the + [documentation](/docs/watson-assistant?topic=watson-assistant-algorithm-version). + On IBM Cloud, you can specify `latest`, `previous`, or `beta`. + On IBM Cloud Pak for Data, you can specify either `beta` or the date of + the version you want to use, in `YYYY-MM-DD` format. """ - self.examples = examples - self.pagination = pagination + self.model = model @classmethod - def _from_dict(cls, _dict): - """Initialize a ExampleCollection object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'WorkspaceSystemSettingsNlp': + """Initialize a WorkspaceSystemSettingsNlp object from a json dictionary.""" args = {} - if 'examples' in _dict: - args['examples'] = [ - Example._from_dict(x) for x in (_dict.get('examples')) - ] - else: - raise ValueError( - 'Required property \'examples\' not present in ExampleCollection JSON' - ) - if 'pagination' in _dict: - args['pagination'] = Pagination._from_dict(_dict.get('pagination')) - else: - raise ValueError( - 'Required property \'pagination\' not present in ExampleCollection JSON' - ) + if (model := _dict.get('model')) is not None: + args['model'] = model return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a WorkspaceSystemSettingsNlp object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'examples') and self.examples is not None: - _dict['examples'] = [x._to_dict() for x in self.examples] - if hasattr(self, 'pagination') and self.pagination is not None: - _dict['pagination'] = self.pagination._to_dict() + if hasattr(self, 'model') and self.model is not None: + _dict['model'] = self.model return _dict - def __str__(self): - """Return a `str` version of this ExampleCollection object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this WorkspaceSystemSettingsNlp object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'WorkspaceSystemSettingsNlp') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'WorkspaceSystemSettingsNlp') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class Intent(object): +class WorkspaceSystemSettingsOffTopic: + """ + Workspace settings related to detection of irrelevant input. + + :param bool enabled: (optional) Whether enhanced irrelevance detection is + enabled for the workspace. """ - Intent. - :attr str intent: The name of the intent. This string must conform to the following - restrictions: - - It can contain only Unicode alphanumeric, underscore, hyphen, and dot characters. - - It cannot begin with the reserved prefix `sys-`. - - It must be no longer than 128 characters. - :attr str description: (optional) The description of the intent. This string cannot - contain carriage return, newline, or tab characters, and it must be no longer than 128 - characters. - :attr datetime created: (optional) The timestamp for creation of the object. - :attr datetime updated: (optional) The timestamp for the most recent update to the - object. - :attr list[Example] examples: (optional) An array of user input examples for the - intent. - """ - - def __init__(self, - intent, - description=None, - created=None, - updated=None, - examples=None): + def __init__( + self, + *, + enabled: Optional[bool] = None, + ) -> None: """ - Initialize a Intent object. + Initialize a WorkspaceSystemSettingsOffTopic object. - :param str intent: The name of the intent. This string must conform to the - following restrictions: - - It can contain only Unicode alphanumeric, underscore, hyphen, and dot - characters. - - It cannot begin with the reserved prefix `sys-`. - - It must be no longer than 128 characters. - :param str description: (optional) The description of the intent. This string - cannot contain carriage return, newline, or tab characters, and it must be no - longer than 128 characters. - :param datetime created: (optional) The timestamp for creation of the object. - :param datetime updated: (optional) The timestamp for the most recent update to - the object. - :param list[Example] examples: (optional) An array of user input examples for the - intent. + :param bool enabled: (optional) Whether enhanced irrelevance detection is + enabled for the workspace. """ - self.intent = intent - self.description = description - self.created = created - self.updated = updated - self.examples = examples + self.enabled = enabled @classmethod - def _from_dict(cls, _dict): - """Initialize a Intent object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'WorkspaceSystemSettingsOffTopic': + """Initialize a WorkspaceSystemSettingsOffTopic object from a json dictionary.""" args = {} - if 'intent' in _dict: - args['intent'] = _dict.get('intent') - else: - raise ValueError( - 'Required property \'intent\' not present in Intent JSON') - if 'description' in _dict: - args['description'] = _dict.get('description') - if 'created' in _dict: - args['created'] = string_to_datetime(_dict.get('created')) - if 'updated' in _dict: - args['updated'] = string_to_datetime(_dict.get('updated')) - if 'examples' in _dict: - args['examples'] = [ - Example._from_dict(x) for x in (_dict.get('examples')) - ] + if (enabled := _dict.get('enabled')) is not None: + args['enabled'] = enabled return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a WorkspaceSystemSettingsOffTopic object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'intent') and self.intent is not None: - _dict['intent'] = self.intent - if hasattr(self, 'description') and self.description is not None: - _dict['description'] = self.description - if hasattr(self, 'created') and self.created is not None: - _dict['created'] = datetime_to_string(self.created) - if hasattr(self, 'updated') and self.updated is not None: - _dict['updated'] = datetime_to_string(self.updated) - if hasattr(self, 'examples') and self.examples is not None: - _dict['examples'] = [x._to_dict() for x in self.examples] + if hasattr(self, 'enabled') and self.enabled is not None: + _dict['enabled'] = self.enabled return _dict - def __str__(self): - """Return a `str` version of this Intent object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __eq__(self, other): + def __str__(self) -> str: + """Return a `str` version of this WorkspaceSystemSettingsOffTopic object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'WorkspaceSystemSettingsOffTopic') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'WorkspaceSystemSettingsOffTopic') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class IntentCollection(object): +class WorkspaceSystemSettingsSystemEntities: """ - IntentCollection. + Workspace settings related to the behavior of system entities. - :attr list[Intent] intents: An array of objects describing the intents defined for the - workspace. - :attr Pagination pagination: The pagination data for the returned objects. + :param bool enabled: (optional) Whether the new system entities are enabled for + the workspace. """ - def __init__(self, intents, pagination): + def __init__( + self, + *, + enabled: Optional[bool] = None, + ) -> None: """ - Initialize a IntentCollection object. + Initialize a WorkspaceSystemSettingsSystemEntities object. - :param list[Intent] intents: An array of objects describing the intents defined - for the workspace. - :param Pagination pagination: The pagination data for the returned objects. + :param bool enabled: (optional) Whether the new system entities are enabled + for the workspace. """ - self.intents = intents - self.pagination = pagination + self.enabled = enabled @classmethod - def _from_dict(cls, _dict): - """Initialize a IntentCollection object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'WorkspaceSystemSettingsSystemEntities': + """Initialize a WorkspaceSystemSettingsSystemEntities object from a json dictionary.""" args = {} - if 'intents' in _dict: - args['intents'] = [ - Intent._from_dict(x) for x in (_dict.get('intents')) - ] - else: - raise ValueError( - 'Required property \'intents\' not present in IntentCollection JSON' - ) - if 'pagination' in _dict: - args['pagination'] = Pagination._from_dict(_dict.get('pagination')) - else: - raise ValueError( - 'Required property \'pagination\' not present in IntentCollection JSON' - ) + if (enabled := _dict.get('enabled')) is not None: + args['enabled'] = enabled return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a WorkspaceSystemSettingsSystemEntities object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'intents') and self.intents is not None: - _dict['intents'] = [x._to_dict() for x in self.intents] - if hasattr(self, 'pagination') and self.pagination is not None: - _dict['pagination'] = self.pagination._to_dict() + if hasattr(self, 'enabled') and self.enabled is not None: + _dict['enabled'] = self.enabled return _dict - def __str__(self): - """Return a `str` version of this IntentCollection object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __eq__(self, other): + def __str__(self) -> str: + """Return a `str` version of this WorkspaceSystemSettingsSystemEntities object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'WorkspaceSystemSettingsSystemEntities') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'WorkspaceSystemSettingsSystemEntities') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class Log(object): +class WorkspaceSystemSettingsTooling: """ - Log. + Workspace settings related to the Watson Assistant user interface. - :attr MessageRequest request: A request sent to the workspace, including the user - input and context. - :attr MessageResponse response: The response sent by the workspace, including the - output text, detected intents and entities, and context. - :attr str log_id: A unique identifier for the logged event. - :attr str request_timestamp: The timestamp for receipt of the message. - :attr str response_timestamp: The timestamp for the system response to the message. - :attr str workspace_id: The unique identifier of the workspace where the request was - made. - :attr str language: The language of the workspace where the message request was made. + :param bool store_generic_responses: (optional) Whether the dialog JSON editor + displays text responses within the `output.generic` object. """ - def __init__(self, request, response, log_id, request_timestamp, - response_timestamp, workspace_id, language): + def __init__( + self, + *, + store_generic_responses: Optional[bool] = None, + ) -> None: """ - Initialize a Log object. + Initialize a WorkspaceSystemSettingsTooling object. - :param MessageRequest request: A request sent to the workspace, including the user - input and context. - :param MessageResponse response: The response sent by the workspace, including the - output text, detected intents and entities, and context. - :param str log_id: A unique identifier for the logged event. - :param str request_timestamp: The timestamp for receipt of the message. - :param str response_timestamp: The timestamp for the system response to the - message. - :param str workspace_id: The unique identifier of the workspace where the request - was made. - :param str language: The language of the workspace where the message request was - made. + :param bool store_generic_responses: (optional) Whether the dialog JSON + editor displays text responses within the `output.generic` object. """ - self.request = request - self.response = response - self.log_id = log_id - self.request_timestamp = request_timestamp - self.response_timestamp = response_timestamp - self.workspace_id = workspace_id - self.language = language + self.store_generic_responses = store_generic_responses - @classmethod - def _from_dict(cls, _dict): - """Initialize a Log object from a json dictionary.""" - args = {} - if 'request' in _dict: - args['request'] = MessageRequest._from_dict(_dict.get('request')) - else: - raise ValueError( - 'Required property \'request\' not present in Log JSON') - if 'response' in _dict: - args['response'] = MessageResponse._from_dict(_dict.get('response')) - else: - raise ValueError( - 'Required property \'response\' not present in Log JSON') - if 'log_id' in _dict: - args['log_id'] = _dict.get('log_id') - else: - raise ValueError( - 'Required property \'log_id\' not present in Log JSON') - if 'request_timestamp' in _dict: - args['request_timestamp'] = _dict.get('request_timestamp') - else: - raise ValueError( - 'Required property \'request_timestamp\' not present in Log JSON' - ) - if 'response_timestamp' in _dict: - args['response_timestamp'] = _dict.get('response_timestamp') - else: - raise ValueError( - 'Required property \'response_timestamp\' not present in Log JSON' - ) - if 'workspace_id' in _dict: - args['workspace_id'] = _dict.get('workspace_id') - else: - raise ValueError( - 'Required property \'workspace_id\' not present in Log JSON') - if 'language' in _dict: - args['language'] = _dict.get('language') - else: - raise ValueError( - 'Required property \'language\' not present in Log JSON') + @classmethod + def from_dict(cls, _dict: Dict) -> 'WorkspaceSystemSettingsTooling': + """Initialize a WorkspaceSystemSettingsTooling object from a json dictionary.""" + args = {} + if (store_generic_responses := + _dict.get('store_generic_responses')) is not None: + args['store_generic_responses'] = store_generic_responses return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a WorkspaceSystemSettingsTooling object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'request') and self.request is not None: - _dict['request'] = self.request._to_dict() - if hasattr(self, 'response') and self.response is not None: - _dict['response'] = self.response._to_dict() - if hasattr(self, 'log_id') and self.log_id is not None: - _dict['log_id'] = self.log_id - if hasattr(self, - 'request_timestamp') and self.request_timestamp is not None: - _dict['request_timestamp'] = self.request_timestamp - if hasattr( - self, - 'response_timestamp') and self.response_timestamp is not None: - _dict['response_timestamp'] = self.response_timestamp - if hasattr(self, 'workspace_id') and self.workspace_id is not None: - _dict['workspace_id'] = self.workspace_id - if hasattr(self, 'language') and self.language is not None: - _dict['language'] = self.language + if hasattr(self, 'store_generic_responses' + ) and self.store_generic_responses is not None: + _dict['store_generic_responses'] = self.store_generic_responses return _dict - def __str__(self): - """Return a `str` version of this Log object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this WorkspaceSystemSettingsTooling object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'WorkspaceSystemSettingsTooling') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'WorkspaceSystemSettingsTooling') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class LogCollection(object): +class DialogNodeOutputGenericDialogNodeOutputResponseTypeAudio( + DialogNodeOutputGeneric): """ - LogCollection. - - :attr list[Log] logs: An array of objects describing log events. - :attr LogPagination pagination: The pagination data for the returned objects. + DialogNodeOutputGenericDialogNodeOutputResponseTypeAudio. + + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + :param str source: The `https:` URL of the audio clip. + :param str title: (optional) An optional title to show before the response. + :param str description: (optional) An optional description to show with the + response. + :param List[ResponseGenericChannel] channels: (optional) An array of objects + specifying channels for which the response is intended. If **channels** is + present, the response is intended for a built-in integration and should not be + handled by an API client. + :param dict channel_options: (optional) For internal use only. + :param str alt_text: (optional) Descriptive text that can be used for screen + readers or other situations where the audio player cannot be seen. """ - def __init__(self, logs, pagination): - """ - Initialize a LogCollection object. - - :param list[Log] logs: An array of objects describing log events. - :param LogPagination pagination: The pagination data for the returned objects. - """ - self.logs = logs - self.pagination = pagination + def __init__( + self, + response_type: str, + source: str, + *, + title: Optional[str] = None, + description: Optional[str] = None, + channels: Optional[List['ResponseGenericChannel']] = None, + channel_options: Optional[dict] = None, + alt_text: Optional[str] = None, + ) -> None: + """ + Initialize a DialogNodeOutputGenericDialogNodeOutputResponseTypeAudio object. + + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + :param str source: The `https:` URL of the audio clip. + :param str title: (optional) An optional title to show before the response. + :param str description: (optional) An optional description to show with the + response. + :param List[ResponseGenericChannel] channels: (optional) An array of + objects specifying channels for which the response is intended. If + **channels** is present, the response is intended for a built-in + integration and should not be handled by an API client. + :param dict channel_options: (optional) For internal use only. + :param str alt_text: (optional) Descriptive text that can be used for + screen readers or other situations where the audio player cannot be seen. + """ + # pylint: disable=super-init-not-called + self.response_type = response_type + self.source = source + self.title = title + self.description = description + self.channels = channels + self.channel_options = channel_options + self.alt_text = alt_text @classmethod - def _from_dict(cls, _dict): - """Initialize a LogCollection object from a json dictionary.""" + def from_dict( + cls, _dict: Dict + ) -> 'DialogNodeOutputGenericDialogNodeOutputResponseTypeAudio': + """Initialize a DialogNodeOutputGenericDialogNodeOutputResponseTypeAudio object from a json dictionary.""" args = {} - if 'logs' in _dict: - args['logs'] = [Log._from_dict(x) for x in (_dict.get('logs'))] + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type else: raise ValueError( - 'Required property \'logs\' not present in LogCollection JSON') - if 'pagination' in _dict: - args['pagination'] = LogPagination._from_dict( - _dict.get('pagination')) + 'Required property \'response_type\' not present in DialogNodeOutputGenericDialogNodeOutputResponseTypeAudio JSON' + ) + if (source := _dict.get('source')) is not None: + args['source'] = source else: raise ValueError( - 'Required property \'pagination\' not present in LogCollection JSON' + 'Required property \'source\' not present in DialogNodeOutputGenericDialogNodeOutputResponseTypeAudio JSON' ) + if (title := _dict.get('title')) is not None: + args['title'] = title + if (description := _dict.get('description')) is not None: + args['description'] = description + if (channels := _dict.get('channels')) is not None: + args['channels'] = [ + ResponseGenericChannel.from_dict(v) for v in channels + ] + if (channel_options := _dict.get('channel_options')) is not None: + args['channel_options'] = channel_options + if (alt_text := _dict.get('alt_text')) is not None: + args['alt_text'] = alt_text return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a DialogNodeOutputGenericDialogNodeOutputResponseTypeAudio object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'logs') and self.logs is not None: - _dict['logs'] = [x._to_dict() for x in self.logs] - if hasattr(self, 'pagination') and self.pagination is not None: - _dict['pagination'] = self.pagination._to_dict() + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, 'source') and self.source is not None: + _dict['source'] = self.source + if hasattr(self, 'title') and self.title is not None: + _dict['title'] = self.title + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'channels') and self.channels is not None: + channels_list = [] + for v in self.channels: + if isinstance(v, dict): + channels_list.append(v) + else: + channels_list.append(v.to_dict()) + _dict['channels'] = channels_list + if hasattr(self, + 'channel_options') and self.channel_options is not None: + _dict['channel_options'] = self.channel_options + if hasattr(self, 'alt_text') and self.alt_text is not None: + _dict['alt_text'] = self.alt_text return _dict - def __str__(self): - """Return a `str` version of this LogCollection object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DialogNodeOutputGenericDialogNodeOutputResponseTypeAudio object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__( + self, other: 'DialogNodeOutputGenericDialogNodeOutputResponseTypeAudio' + ) -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__( + self, other: 'DialogNodeOutputGenericDialogNodeOutputResponseTypeAudio' + ) -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class LogMessage(object): +class DialogNodeOutputGenericDialogNodeOutputResponseTypeChannelTransfer( + DialogNodeOutputGeneric): """ - Log message details. - - :attr str level: The severity of the log message. - :attr str msg: The text of the log message. + DialogNodeOutputGenericDialogNodeOutputResponseTypeChannelTransfer. + + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + **Note:** The `channel_transfer` response type is not supported on IBM Cloud + Pak for Data. + :param str message_to_user: The message to display to the user when initiating a + channel transfer. + :param ChannelTransferInfo transfer_info: Information used by an integration to + transfer the conversation to a different channel. + :param List[ResponseGenericChannel] channels: (optional) An array of objects + specifying channels for which the response is intended. """ - def __init__(self, level, msg, **kwargs): - """ - Initialize a LogMessage object. - - :param str level: The severity of the log message. - :param str msg: The text of the log message. - :param **kwargs: (optional) Any additional properties. - """ - self.level = level - self.msg = msg - for _key, _value in kwargs.items(): - setattr(self, _key, _value) + def __init__( + self, + response_type: str, + message_to_user: str, + transfer_info: 'ChannelTransferInfo', + *, + channels: Optional[List['ResponseGenericChannel']] = None, + ) -> None: + """ + Initialize a DialogNodeOutputGenericDialogNodeOutputResponseTypeChannelTransfer object. + + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + **Note:** The `channel_transfer` response type is not supported on IBM + Cloud Pak for Data. + :param str message_to_user: The message to display to the user when + initiating a channel transfer. + :param ChannelTransferInfo transfer_info: Information used by an + integration to transfer the conversation to a different channel. + :param List[ResponseGenericChannel] channels: (optional) An array of + objects specifying channels for which the response is intended. + """ + # pylint: disable=super-init-not-called + self.response_type = response_type + self.message_to_user = message_to_user + self.transfer_info = transfer_info + self.channels = channels @classmethod - def _from_dict(cls, _dict): - """Initialize a LogMessage object from a json dictionary.""" + def from_dict( + cls, _dict: Dict + ) -> 'DialogNodeOutputGenericDialogNodeOutputResponseTypeChannelTransfer': + """Initialize a DialogNodeOutputGenericDialogNodeOutputResponseTypeChannelTransfer object from a json dictionary.""" args = {} - xtra = _dict.copy() - if 'level' in _dict: - args['level'] = _dict.get('level') - del xtra['level'] + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type else: raise ValueError( - 'Required property \'level\' not present in LogMessage JSON') - if 'msg' in _dict: - args['msg'] = _dict.get('msg') - del xtra['msg'] + 'Required property \'response_type\' not present in DialogNodeOutputGenericDialogNodeOutputResponseTypeChannelTransfer JSON' + ) + if (message_to_user := _dict.get('message_to_user')) is not None: + args['message_to_user'] = message_to_user else: raise ValueError( - 'Required property \'msg\' not present in LogMessage JSON') - args.update(xtra) + 'Required property \'message_to_user\' not present in DialogNodeOutputGenericDialogNodeOutputResponseTypeChannelTransfer JSON' + ) + if (transfer_info := _dict.get('transfer_info')) is not None: + args['transfer_info'] = ChannelTransferInfo.from_dict(transfer_info) + else: + raise ValueError( + 'Required property \'transfer_info\' not present in DialogNodeOutputGenericDialogNodeOutputResponseTypeChannelTransfer JSON' + ) + if (channels := _dict.get('channels')) is not None: + args['channels'] = [ + ResponseGenericChannel.from_dict(v) for v in channels + ] return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a DialogNodeOutputGenericDialogNodeOutputResponseTypeChannelTransfer object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'level') and self.level is not None: - _dict['level'] = self.level - if hasattr(self, 'msg') and self.msg is not None: - _dict['msg'] = self.msg - if hasattr(self, '_additionalProperties'): - for _key in self._additionalProperties: - _value = getattr(self, _key, None) - if _value is not None: - _dict[_key] = _value + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, + 'message_to_user') and self.message_to_user is not None: + _dict['message_to_user'] = self.message_to_user + if hasattr(self, 'transfer_info') and self.transfer_info is not None: + if isinstance(self.transfer_info, dict): + _dict['transfer_info'] = self.transfer_info + else: + _dict['transfer_info'] = self.transfer_info.to_dict() + if hasattr(self, 'channels') and self.channels is not None: + channels_list = [] + for v in self.channels: + if isinstance(v, dict): + channels_list.append(v) + else: + channels_list.append(v.to_dict()) + _dict['channels'] = channels_list return _dict - def __setattr__(self, name, value): - properties = {'level', 'msg'} - if not hasattr(self, '_additionalProperties'): - super(LogMessage, self).__setattr__('_additionalProperties', set()) - if name not in properties: - self._additionalProperties.add(name) - super(LogMessage, self).__setattr__(name, value) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __str__(self): - """Return a `str` version of this LogMessage object.""" - return json.dumps(self._to_dict(), indent=2) + def __str__(self) -> str: + """Return a `str` version of this DialogNodeOutputGenericDialogNodeOutputResponseTypeChannelTransfer object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__( + self, other: + 'DialogNodeOutputGenericDialogNodeOutputResponseTypeChannelTransfer' + ) -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__( + self, other: + 'DialogNodeOutputGenericDialogNodeOutputResponseTypeChannelTransfer' + ) -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class LogPagination(object): +class DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent( + DialogNodeOutputGeneric): """ - The pagination data for the returned objects. - - :attr str next_url: (optional) The URL that will return the next page of results, if - any. - :attr int matched: (optional) Reserved for future use. - :attr str next_cursor: (optional) A token identifying the next page of results. + DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent. + + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + :param str message_to_human_agent: (optional) An optional message to be sent to + the human agent who will be taking over the conversation. + :param AgentAvailabilityMessage agent_available: (optional) An optional message + to be displayed to the user to indicate that the conversation will be + transferred to the next available agent. + :param AgentAvailabilityMessage agent_unavailable: (optional) An optional + message to be displayed to the user to indicate that no online agent is + available to take over the conversation. + :param DialogNodeOutputConnectToAgentTransferInfo transfer_info: (optional) + Routing or other contextual information to be used by target service desk + systems. + :param List[ResponseGenericChannel] channels: (optional) An array of objects + specifying channels for which the response is intended. """ - def __init__(self, next_url=None, matched=None, next_cursor=None): - """ - Initialize a LogPagination object. - - :param str next_url: (optional) The URL that will return the next page of results, - if any. - :param int matched: (optional) Reserved for future use. - :param str next_cursor: (optional) A token identifying the next page of results. - """ - self.next_url = next_url - self.matched = matched - self.next_cursor = next_cursor + def __init__( + self, + response_type: str, + *, + message_to_human_agent: Optional[str] = None, + agent_available: Optional['AgentAvailabilityMessage'] = None, + agent_unavailable: Optional['AgentAvailabilityMessage'] = None, + transfer_info: Optional[ + 'DialogNodeOutputConnectToAgentTransferInfo'] = None, + channels: Optional[List['ResponseGenericChannel']] = None, + ) -> None: + """ + Initialize a DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent object. + + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + :param str message_to_human_agent: (optional) An optional message to be + sent to the human agent who will be taking over the conversation. + :param AgentAvailabilityMessage agent_available: (optional) An optional + message to be displayed to the user to indicate that the conversation will + be transferred to the next available agent. + :param AgentAvailabilityMessage agent_unavailable: (optional) An optional + message to be displayed to the user to indicate that no online agent is + available to take over the conversation. + :param DialogNodeOutputConnectToAgentTransferInfo transfer_info: (optional) + Routing or other contextual information to be used by target service desk + systems. + :param List[ResponseGenericChannel] channels: (optional) An array of + objects specifying channels for which the response is intended. + """ + # pylint: disable=super-init-not-called + self.response_type = response_type + self.message_to_human_agent = message_to_human_agent + self.agent_available = agent_available + self.agent_unavailable = agent_unavailable + self.transfer_info = transfer_info + self.channels = channels @classmethod - def _from_dict(cls, _dict): - """Initialize a LogPagination object from a json dictionary.""" + def from_dict( + cls, _dict: Dict + ) -> 'DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent': + """Initialize a DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent object from a json dictionary.""" args = {} - if 'next_url' in _dict: - args['next_url'] = _dict.get('next_url') - if 'matched' in _dict: - args['matched'] = _dict.get('matched') - if 'next_cursor' in _dict: - args['next_cursor'] = _dict.get('next_cursor') + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type + else: + raise ValueError( + 'Required property \'response_type\' not present in DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent JSON' + ) + if (message_to_human_agent := + _dict.get('message_to_human_agent')) is not None: + args['message_to_human_agent'] = message_to_human_agent + if (agent_available := _dict.get('agent_available')) is not None: + args['agent_available'] = AgentAvailabilityMessage.from_dict( + agent_available) + if (agent_unavailable := _dict.get('agent_unavailable')) is not None: + args['agent_unavailable'] = AgentAvailabilityMessage.from_dict( + agent_unavailable) + if (transfer_info := _dict.get('transfer_info')) is not None: + args[ + 'transfer_info'] = DialogNodeOutputConnectToAgentTransferInfo.from_dict( + transfer_info) + if (channels := _dict.get('channels')) is not None: + args['channels'] = [ + ResponseGenericChannel.from_dict(v) for v in channels + ] return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'next_url') and self.next_url is not None: - _dict['next_url'] = self.next_url - if hasattr(self, 'matched') and self.matched is not None: - _dict['matched'] = self.matched - if hasattr(self, 'next_cursor') and self.next_cursor is not None: - _dict['next_cursor'] = self.next_cursor + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, 'message_to_human_agent' + ) and self.message_to_human_agent is not None: + _dict['message_to_human_agent'] = self.message_to_human_agent + if hasattr(self, + 'agent_available') and self.agent_available is not None: + if isinstance(self.agent_available, dict): + _dict['agent_available'] = self.agent_available + else: + _dict['agent_available'] = self.agent_available.to_dict() + if hasattr(self, + 'agent_unavailable') and self.agent_unavailable is not None: + if isinstance(self.agent_unavailable, dict): + _dict['agent_unavailable'] = self.agent_unavailable + else: + _dict['agent_unavailable'] = self.agent_unavailable.to_dict() + if hasattr(self, 'transfer_info') and self.transfer_info is not None: + if isinstance(self.transfer_info, dict): + _dict['transfer_info'] = self.transfer_info + else: + _dict['transfer_info'] = self.transfer_info.to_dict() + if hasattr(self, 'channels') and self.channels is not None: + channels_list = [] + for v in self.channels: + if isinstance(v, dict): + channels_list.append(v) + else: + channels_list.append(v.to_dict()) + _dict['channels'] = channels_list return _dict - def __str__(self): - """Return a `str` version of this LogPagination object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__( + self, + other: 'DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent' + ) -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__( + self, + other: 'DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent' + ) -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class Mention(object): +class DialogNodeOutputGenericDialogNodeOutputResponseTypeIframe( + DialogNodeOutputGeneric): """ - A mention of a contextual entity. - - :attr str entity: The name of the entity. - :attr list[int] location: An array of zero-based character offsets that indicate where - the entity mentions begin and end in the input text. + DialogNodeOutputGenericDialogNodeOutputResponseTypeIframe. + + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + :param str source: The `https:` URL of the embeddable content. + :param str title: (optional) An optional title to show before the response. + :param str description: (optional) An optional description to show with the + response. + :param str image_url: (optional) The URL of an image that shows a preview of the + embedded content. + :param List[ResponseGenericChannel] channels: (optional) An array of objects + specifying channels for which the response is intended. If **channels** is + present, the response is intended for a built-in integration and should not be + handled by an API client. """ - def __init__(self, entity, location): - """ - Initialize a Mention object. - - :param str entity: The name of the entity. - :param list[int] location: An array of zero-based character offsets that indicate - where the entity mentions begin and end in the input text. - """ - self.entity = entity - self.location = location + def __init__( + self, + response_type: str, + source: str, + *, + title: Optional[str] = None, + description: Optional[str] = None, + image_url: Optional[str] = None, + channels: Optional[List['ResponseGenericChannel']] = None, + ) -> None: + """ + Initialize a DialogNodeOutputGenericDialogNodeOutputResponseTypeIframe object. + + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + :param str source: The `https:` URL of the embeddable content. + :param str title: (optional) An optional title to show before the response. + :param str description: (optional) An optional description to show with the + response. + :param str image_url: (optional) The URL of an image that shows a preview + of the embedded content. + :param List[ResponseGenericChannel] channels: (optional) An array of + objects specifying channels for which the response is intended. If + **channels** is present, the response is intended for a built-in + integration and should not be handled by an API client. + """ + # pylint: disable=super-init-not-called + self.response_type = response_type + self.source = source + self.title = title + self.description = description + self.image_url = image_url + self.channels = channels @classmethod - def _from_dict(cls, _dict): - """Initialize a Mention object from a json dictionary.""" + def from_dict( + cls, _dict: Dict + ) -> 'DialogNodeOutputGenericDialogNodeOutputResponseTypeIframe': + """Initialize a DialogNodeOutputGenericDialogNodeOutputResponseTypeIframe object from a json dictionary.""" args = {} - if 'entity' in _dict: - args['entity'] = _dict.get('entity') + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type else: raise ValueError( - 'Required property \'entity\' not present in Mention JSON') - if 'location' in _dict: - args['location'] = _dict.get('location') + 'Required property \'response_type\' not present in DialogNodeOutputGenericDialogNodeOutputResponseTypeIframe JSON' + ) + if (source := _dict.get('source')) is not None: + args['source'] = source else: raise ValueError( - 'Required property \'location\' not present in Mention JSON') + 'Required property \'source\' not present in DialogNodeOutputGenericDialogNodeOutputResponseTypeIframe JSON' + ) + if (title := _dict.get('title')) is not None: + args['title'] = title + if (description := _dict.get('description')) is not None: + args['description'] = description + if (image_url := _dict.get('image_url')) is not None: + args['image_url'] = image_url + if (channels := _dict.get('channels')) is not None: + args['channels'] = [ + ResponseGenericChannel.from_dict(v) for v in channels + ] return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a DialogNodeOutputGenericDialogNodeOutputResponseTypeIframe object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'entity') and self.entity is not None: - _dict['entity'] = self.entity - if hasattr(self, 'location') and self.location is not None: - _dict['location'] = self.location + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, 'source') and self.source is not None: + _dict['source'] = self.source + if hasattr(self, 'title') and self.title is not None: + _dict['title'] = self.title + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'image_url') and self.image_url is not None: + _dict['image_url'] = self.image_url + if hasattr(self, 'channels') and self.channels is not None: + channels_list = [] + for v in self.channels: + if isinstance(v, dict): + channels_list.append(v) + else: + channels_list.append(v.to_dict()) + _dict['channels'] = channels_list return _dict - def __str__(self): - """Return a `str` version of this Mention object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DialogNodeOutputGenericDialogNodeOutputResponseTypeIframe object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__( + self, other: 'DialogNodeOutputGenericDialogNodeOutputResponseTypeIframe' + ) -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__( + self, other: 'DialogNodeOutputGenericDialogNodeOutputResponseTypeIframe' + ) -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class MessageContextMetadata(object): +class DialogNodeOutputGenericDialogNodeOutputResponseTypeImage( + DialogNodeOutputGeneric): """ - Metadata related to the message. - - :attr str deployment: (optional) A label identifying the deployment environment, used - for filtering log data. This string cannot contain carriage return, newline, or tab - characters. - :attr str user_id: (optional) A string value that identifies the user who is - interacting with the workspace. The client must provide a unique identifier for each - individual end user who accesses the application. For Plus and Premium plans, this - user ID is used to identify unique users for billing purposes. This string cannot - contain carriage return, newline, or tab characters. + DialogNodeOutputGenericDialogNodeOutputResponseTypeImage. + + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + :param str source: The `https:` URL of the image. + :param str title: (optional) An optional title to show before the response. + :param str description: (optional) An optional description to show with the + response. + :param List[ResponseGenericChannel] channels: (optional) An array of objects + specifying channels for which the response is intended. + :param str alt_text: (optional) Descriptive text that can be used for screen + readers or other situations where the image cannot be seen. """ - def __init__(self, deployment=None, user_id=None): - """ - Initialize a MessageContextMetadata object. - - :param str deployment: (optional) A label identifying the deployment environment, - used for filtering log data. This string cannot contain carriage return, newline, - or tab characters. - :param str user_id: (optional) A string value that identifies the user who is - interacting with the workspace. The client must provide a unique identifier for - each individual end user who accesses the application. For Plus and Premium plans, - this user ID is used to identify unique users for billing purposes. This string - cannot contain carriage return, newline, or tab characters. + def __init__( + self, + response_type: str, + source: str, + *, + title: Optional[str] = None, + description: Optional[str] = None, + channels: Optional[List['ResponseGenericChannel']] = None, + alt_text: Optional[str] = None, + ) -> None: + """ + Initialize a DialogNodeOutputGenericDialogNodeOutputResponseTypeImage object. + + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + :param str source: The `https:` URL of the image. + :param str title: (optional) An optional title to show before the response. + :param str description: (optional) An optional description to show with the + response. + :param List[ResponseGenericChannel] channels: (optional) An array of + objects specifying channels for which the response is intended. + :param str alt_text: (optional) Descriptive text that can be used for + screen readers or other situations where the image cannot be seen. """ - self.deployment = deployment - self.user_id = user_id + # pylint: disable=super-init-not-called + self.response_type = response_type + self.source = source + self.title = title + self.description = description + self.channels = channels + self.alt_text = alt_text @classmethod - def _from_dict(cls, _dict): - """Initialize a MessageContextMetadata object from a json dictionary.""" + def from_dict( + cls, _dict: Dict + ) -> 'DialogNodeOutputGenericDialogNodeOutputResponseTypeImage': + """Initialize a DialogNodeOutputGenericDialogNodeOutputResponseTypeImage object from a json dictionary.""" args = {} - if 'deployment' in _dict: - args['deployment'] = _dict.get('deployment') - if 'user_id' in _dict: - args['user_id'] = _dict.get('user_id') + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type + else: + raise ValueError( + 'Required property \'response_type\' not present in DialogNodeOutputGenericDialogNodeOutputResponseTypeImage JSON' + ) + if (source := _dict.get('source')) is not None: + args['source'] = source + else: + raise ValueError( + 'Required property \'source\' not present in DialogNodeOutputGenericDialogNodeOutputResponseTypeImage JSON' + ) + if (title := _dict.get('title')) is not None: + args['title'] = title + if (description := _dict.get('description')) is not None: + args['description'] = description + if (channels := _dict.get('channels')) is not None: + args['channels'] = [ + ResponseGenericChannel.from_dict(v) for v in channels + ] + if (alt_text := _dict.get('alt_text')) is not None: + args['alt_text'] = alt_text return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a DialogNodeOutputGenericDialogNodeOutputResponseTypeImage object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'deployment') and self.deployment is not None: - _dict['deployment'] = self.deployment - if hasattr(self, 'user_id') and self.user_id is not None: - _dict['user_id'] = self.user_id + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, 'source') and self.source is not None: + _dict['source'] = self.source + if hasattr(self, 'title') and self.title is not None: + _dict['title'] = self.title + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'channels') and self.channels is not None: + channels_list = [] + for v in self.channels: + if isinstance(v, dict): + channels_list.append(v) + else: + channels_list.append(v.to_dict()) + _dict['channels'] = channels_list + if hasattr(self, 'alt_text') and self.alt_text is not None: + _dict['alt_text'] = self.alt_text return _dict - def __str__(self): - """Return a `str` version of this MessageContextMetadata object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DialogNodeOutputGenericDialogNodeOutputResponseTypeImage object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__( + self, other: 'DialogNodeOutputGenericDialogNodeOutputResponseTypeImage' + ) -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__( + self, other: 'DialogNodeOutputGenericDialogNodeOutputResponseTypeImage' + ) -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class MessageInput(object): +class DialogNodeOutputGenericDialogNodeOutputResponseTypeOption( + DialogNodeOutputGeneric): """ - An input object that includes the input text. - - :attr str text: (optional) The text of the user input. This string cannot contain - carriage return, newline, or tab characters, and it must be no longer than 2048 - characters. + DialogNodeOutputGenericDialogNodeOutputResponseTypeOption. + + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + :param str title: An optional title to show before the response. + :param str description: (optional) An optional description to show with the + response. + :param str preference: (optional) The preferred type of control to display, if + supported by the channel. + :param List[DialogNodeOutputOptionsElement] options: An array of objects + describing the options from which the user can choose. You can include up to 20 + options. + :param List[ResponseGenericChannel] channels: (optional) An array of objects + specifying channels for which the response is intended. """ - def __init__(self, text=None, **kwargs): + def __init__( + self, + response_type: str, + title: str, + options: List['DialogNodeOutputOptionsElement'], + *, + description: Optional[str] = None, + preference: Optional[str] = None, + channels: Optional[List['ResponseGenericChannel']] = None, + ) -> None: + """ + Initialize a DialogNodeOutputGenericDialogNodeOutputResponseTypeOption object. + + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + :param str title: An optional title to show before the response. + :param List[DialogNodeOutputOptionsElement] options: An array of objects + describing the options from which the user can choose. You can include up + to 20 options. + :param str description: (optional) An optional description to show with the + response. + :param str preference: (optional) The preferred type of control to display, + if supported by the channel. + :param List[ResponseGenericChannel] channels: (optional) An array of + objects specifying channels for which the response is intended. """ - Initialize a MessageInput object. + # pylint: disable=super-init-not-called + self.response_type = response_type + self.title = title + self.description = description + self.preference = preference + self.options = options + self.channels = channels - :param str text: (optional) The text of the user input. This string cannot contain - carriage return, newline, or tab characters, and it must be no longer than 2048 - characters. - :param **kwargs: (optional) Any additional properties. - """ - self.text = text - for _key, _value in kwargs.items(): - setattr(self, _key, _value) + @classmethod + def from_dict( + cls, _dict: Dict + ) -> 'DialogNodeOutputGenericDialogNodeOutputResponseTypeOption': + """Initialize a DialogNodeOutputGenericDialogNodeOutputResponseTypeOption object from a json dictionary.""" + args = {} + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type + else: + raise ValueError( + 'Required property \'response_type\' not present in DialogNodeOutputGenericDialogNodeOutputResponseTypeOption JSON' + ) + if (title := _dict.get('title')) is not None: + args['title'] = title + else: + raise ValueError( + 'Required property \'title\' not present in DialogNodeOutputGenericDialogNodeOutputResponseTypeOption JSON' + ) + if (description := _dict.get('description')) is not None: + args['description'] = description + if (preference := _dict.get('preference')) is not None: + args['preference'] = preference + if (options := _dict.get('options')) is not None: + args['options'] = [ + DialogNodeOutputOptionsElement.from_dict(v) for v in options + ] + else: + raise ValueError( + 'Required property \'options\' not present in DialogNodeOutputGenericDialogNodeOutputResponseTypeOption JSON' + ) + if (channels := _dict.get('channels')) is not None: + args['channels'] = [ + ResponseGenericChannel.from_dict(v) for v in channels + ] + return cls(**args) @classmethod def _from_dict(cls, _dict): - """Initialize a MessageInput object from a json dictionary.""" - args = {} - xtra = _dict.copy() - if 'text' in _dict: - args['text'] = _dict.get('text') - del xtra['text'] - args.update(xtra) - return cls(**args) + """Initialize a DialogNodeOutputGenericDialogNodeOutputResponseTypeOption object from a json dictionary.""" + return cls.from_dict(_dict) - def _to_dict(self): + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - if hasattr(self, '_additionalProperties'): - for _key in self._additionalProperties: - _value = getattr(self, _key, None) - if _value is not None: - _dict[_key] = _value + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, 'title') and self.title is not None: + _dict['title'] = self.title + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'preference') and self.preference is not None: + _dict['preference'] = self.preference + if hasattr(self, 'options') and self.options is not None: + options_list = [] + for v in self.options: + if isinstance(v, dict): + options_list.append(v) + else: + options_list.append(v.to_dict()) + _dict['options'] = options_list + if hasattr(self, 'channels') and self.channels is not None: + channels_list = [] + for v in self.channels: + if isinstance(v, dict): + channels_list.append(v) + else: + channels_list.append(v.to_dict()) + _dict['channels'] = channels_list return _dict - def __setattr__(self, name, value): - properties = {'text'} - if not hasattr(self, '_additionalProperties'): - super(MessageInput, self).__setattr__('_additionalProperties', - set()) - if name not in properties: - self._additionalProperties.add(name) - super(MessageInput, self).__setattr__(name, value) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __str__(self): - """Return a `str` version of this MessageInput object.""" - return json.dumps(self._to_dict(), indent=2) + def __str__(self) -> str: + """Return a `str` version of this DialogNodeOutputGenericDialogNodeOutputResponseTypeOption object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__( + self, other: 'DialogNodeOutputGenericDialogNodeOutputResponseTypeOption' + ) -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__( + self, other: 'DialogNodeOutputGenericDialogNodeOutputResponseTypeOption' + ) -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other + class PreferenceEnum(str, Enum): + """ + The preferred type of control to display, if supported by the channel. + """ -class MessageRequest(object): - """ - A request sent to the workspace, including the user input and context. + DROPDOWN = 'dropdown' + BUTTON = 'button' - :attr MessageInput input: (optional) An input object that includes the input text. - :attr list[RuntimeIntent] intents: (optional) Intents to use when evaluating the user - input. Include intents from the previous response to continue using those intents - rather than trying to recognize intents in the new input. - :attr list[RuntimeEntity] entities: (optional) Entities to use when evaluating the - message. Include entities from the previous response to continue using those entities - rather than detecting entities in the new input. - :attr bool alternate_intents: (optional) Whether to return more than one intent. A - value of `true` indicates that all matching intents are returned. - :attr Context context: (optional) State information for the conversation. To maintain - state, include the context from the previous response. - :attr OutputData output: (optional) An output object that includes the response to the - user, the dialog nodes that were triggered, and messages from the log. - :attr list[DialogNodeAction] actions: (optional) An array of objects describing any - actions requested by the dialog node. - """ - - def __init__(self, - input=None, - intents=None, - entities=None, - alternate_intents=None, - context=None, - output=None, - actions=None): - """ - Initialize a MessageRequest object. - :param MessageInput input: (optional) An input object that includes the input - text. - :param list[RuntimeIntent] intents: (optional) Intents to use when evaluating the - user input. Include intents from the previous response to continue using those - intents rather than trying to recognize intents in the new input. - :param list[RuntimeEntity] entities: (optional) Entities to use when evaluating - the message. Include entities from the previous response to continue using those - entities rather than detecting entities in the new input. - :param bool alternate_intents: (optional) Whether to return more than one intent. - A value of `true` indicates that all matching intents are returned. - :param Context context: (optional) State information for the conversation. To - maintain state, include the context from the previous response. - :param OutputData output: (optional) An output object that includes the response - to the user, the dialog nodes that were triggered, and messages from the log. - :param list[DialogNodeAction] actions: (optional) An array of objects describing - any actions requested by the dialog node. - """ - self.input = input - self.intents = intents - self.entities = entities - self.alternate_intents = alternate_intents - self.context = context - self.output = output - self.actions = actions +class DialogNodeOutputGenericDialogNodeOutputResponseTypePause( + DialogNodeOutputGeneric): + """ + DialogNodeOutputGenericDialogNodeOutputResponseTypePause. + + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + :param int time: How long to pause, in milliseconds. The valid values are from 0 + to 10000. + :param bool typing: (optional) Whether to send a "user is typing" event during + the pause. Ignored if the channel does not support this event. + :param List[ResponseGenericChannel] channels: (optional) An array of objects + specifying channels for which the response is intended. + """ + + def __init__( + self, + response_type: str, + time: int, + *, + typing: Optional[bool] = None, + channels: Optional[List['ResponseGenericChannel']] = None, + ) -> None: + """ + Initialize a DialogNodeOutputGenericDialogNodeOutputResponseTypePause object. + + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + :param int time: How long to pause, in milliseconds. The valid values are + from 0 to 10000. + :param bool typing: (optional) Whether to send a "user is typing" event + during the pause. Ignored if the channel does not support this event. + :param List[ResponseGenericChannel] channels: (optional) An array of + objects specifying channels for which the response is intended. + """ + # pylint: disable=super-init-not-called + self.response_type = response_type + self.time = time + self.typing = typing + self.channels = channels @classmethod - def _from_dict(cls, _dict): - """Initialize a MessageRequest object from a json dictionary.""" + def from_dict( + cls, _dict: Dict + ) -> 'DialogNodeOutputGenericDialogNodeOutputResponseTypePause': + """Initialize a DialogNodeOutputGenericDialogNodeOutputResponseTypePause object from a json dictionary.""" args = {} - if 'input' in _dict: - args['input'] = MessageInput._from_dict(_dict.get('input')) - if 'intents' in _dict: - args['intents'] = [ - RuntimeIntent._from_dict(x) for x in (_dict.get('intents')) - ] - if 'entities' in _dict: - args['entities'] = [ - RuntimeEntity._from_dict(x) for x in (_dict.get('entities')) - ] - if 'alternate_intents' in _dict: - args['alternate_intents'] = _dict.get('alternate_intents') - if 'context' in _dict: - args['context'] = Context._from_dict(_dict.get('context')) - if 'output' in _dict: - args['output'] = OutputData._from_dict(_dict.get('output')) - if 'actions' in _dict: - args['actions'] = [ - DialogNodeAction._from_dict(x) for x in (_dict.get('actions')) + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type + else: + raise ValueError( + 'Required property \'response_type\' not present in DialogNodeOutputGenericDialogNodeOutputResponseTypePause JSON' + ) + if (time := _dict.get('time')) is not None: + args['time'] = time + else: + raise ValueError( + 'Required property \'time\' not present in DialogNodeOutputGenericDialogNodeOutputResponseTypePause JSON' + ) + if (typing := _dict.get('typing')) is not None: + args['typing'] = typing + if (channels := _dict.get('channels')) is not None: + args['channels'] = [ + ResponseGenericChannel.from_dict(v) for v in channels ] return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a DialogNodeOutputGenericDialogNodeOutputResponseTypePause object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'input') and self.input is not None: - _dict['input'] = self.input._to_dict() - if hasattr(self, 'intents') and self.intents is not None: - _dict['intents'] = [x._to_dict() for x in self.intents] - if hasattr(self, 'entities') and self.entities is not None: - _dict['entities'] = [x._to_dict() for x in self.entities] - if hasattr(self, - 'alternate_intents') and self.alternate_intents is not None: - _dict['alternate_intents'] = self.alternate_intents - if hasattr(self, 'context') and self.context is not None: - _dict['context'] = self.context._to_dict() - if hasattr(self, 'output') and self.output is not None: - _dict['output'] = self.output._to_dict() - if hasattr(self, 'actions') and self.actions is not None: - _dict['actions'] = [x._to_dict() for x in self.actions] + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, 'time') and self.time is not None: + _dict['time'] = self.time + if hasattr(self, 'typing') and self.typing is not None: + _dict['typing'] = self.typing + if hasattr(self, 'channels') and self.channels is not None: + channels_list = [] + for v in self.channels: + if isinstance(v, dict): + channels_list.append(v) + else: + channels_list.append(v.to_dict()) + _dict['channels'] = channels_list return _dict - def __str__(self): - """Return a `str` version of this MessageRequest object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __eq__(self, other): + def __str__(self) -> str: + """Return a `str` version of this DialogNodeOutputGenericDialogNodeOutputResponseTypePause object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__( + self, other: 'DialogNodeOutputGenericDialogNodeOutputResponseTypePause' + ) -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__( + self, other: 'DialogNodeOutputGenericDialogNodeOutputResponseTypePause' + ) -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class MessageResponse(object): +class DialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill( + DialogNodeOutputGeneric): + """ + DialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill. + + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + **Note:** The **search_skill** response type is used only by the v2 runtime API. + :param str query: The text of the search query. This can be either a + natural-language query or a query that uses the Discovery query language syntax, + depending on the value of the **query_type** property. For more information, see + the [Discovery service + documentation](https://cloud.ibm.com/docs/discovery?topic=discovery-query-operators#query-operators). + :param str query_type: The type of the search query. + :param str filter: (optional) An optional filter that narrows the set of + documents to be searched. For more information, see the [Discovery service + documentation]([Discovery service + documentation](https://cloud.ibm.com/docs/discovery?topic=discovery-query-parameters#filter). + :param str discovery_version: (optional) The version of the Discovery service + API to use for the query. + :param List[ResponseGenericChannel] channels: (optional) An array of objects + specifying channels for which the response is intended. """ - The response sent by the workspace, including the output text, detected intents and - entities, and context. - - :attr MessageInput input: An input object that includes the input text. - :attr list[RuntimeIntent] intents: An array of intents recognized in the user input, - sorted in descending order of confidence. - :attr list[RuntimeEntity] entities: An array of entities identified in the user input. - :attr bool alternate_intents: (optional) Whether to return more than one intent. A - value of `true` indicates that all matching intents are returned. - :attr Context context: State information for the conversation. To maintain state, - include the context from the previous response. - :attr OutputData output: An output object that includes the response to the user, the - dialog nodes that were triggered, and messages from the log. - :attr list[DialogNodeAction] actions: (optional) An array of objects describing any - actions requested by the dialog node. - """ - - def __init__(self, - input, - intents, - entities, - context, - output, - alternate_intents=None, - actions=None): - """ - Initialize a MessageResponse object. - :param MessageInput input: An input object that includes the input text. - :param list[RuntimeIntent] intents: An array of intents recognized in the user - input, sorted in descending order of confidence. - :param list[RuntimeEntity] entities: An array of entities identified in the user - input. - :param Context context: State information for the conversation. To maintain state, - include the context from the previous response. - :param OutputData output: An output object that includes the response to the user, - the dialog nodes that were triggered, and messages from the log. - :param bool alternate_intents: (optional) Whether to return more than one intent. - A value of `true` indicates that all matching intents are returned. - :param list[DialogNodeAction] actions: (optional) An array of objects describing - any actions requested by the dialog node. - """ - self.input = input - self.intents = intents - self.entities = entities - self.alternate_intents = alternate_intents - self.context = context - self.output = output - self.actions = actions + def __init__( + self, + response_type: str, + query: str, + query_type: str, + *, + filter: Optional[str] = None, + discovery_version: Optional[str] = None, + channels: Optional[List['ResponseGenericChannel']] = None, + ) -> None: + """ + Initialize a DialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill object. + + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + **Note:** The **search_skill** response type is used only by the v2 runtime + API. + :param str query: The text of the search query. This can be either a + natural-language query or a query that uses the Discovery query language + syntax, depending on the value of the **query_type** property. For more + information, see the [Discovery service + documentation](https://cloud.ibm.com/docs/discovery?topic=discovery-query-operators#query-operators). + :param str query_type: The type of the search query. + :param str filter: (optional) An optional filter that narrows the set of + documents to be searched. For more information, see the [Discovery service + documentation]([Discovery service + documentation](https://cloud.ibm.com/docs/discovery?topic=discovery-query-parameters#filter). + :param str discovery_version: (optional) The version of the Discovery + service API to use for the query. + :param List[ResponseGenericChannel] channels: (optional) An array of + objects specifying channels for which the response is intended. + """ + # pylint: disable=super-init-not-called + self.response_type = response_type + self.query = query + self.query_type = query_type + self.filter = filter + self.discovery_version = discovery_version + self.channels = channels @classmethod - def _from_dict(cls, _dict): - """Initialize a MessageResponse object from a json dictionary.""" + def from_dict( + cls, _dict: Dict + ) -> 'DialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill': + """Initialize a DialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill object from a json dictionary.""" args = {} - if 'input' in _dict: - args['input'] = MessageInput._from_dict(_dict.get('input')) - else: - raise ValueError( - 'Required property \'input\' not present in MessageResponse JSON' - ) - if 'intents' in _dict: - args['intents'] = [ - RuntimeIntent._from_dict(x) for x in (_dict.get('intents')) - ] - else: - raise ValueError( - 'Required property \'intents\' not present in MessageResponse JSON' - ) - if 'entities' in _dict: - args['entities'] = [ - RuntimeEntity._from_dict(x) for x in (_dict.get('entities')) - ] + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type else: raise ValueError( - 'Required property \'entities\' not present in MessageResponse JSON' + 'Required property \'response_type\' not present in DialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill JSON' ) - if 'alternate_intents' in _dict: - args['alternate_intents'] = _dict.get('alternate_intents') - if 'context' in _dict: - args['context'] = Context._from_dict(_dict.get('context')) + if (query := _dict.get('query')) is not None: + args['query'] = query else: raise ValueError( - 'Required property \'context\' not present in MessageResponse JSON' + 'Required property \'query\' not present in DialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill JSON' ) - if 'output' in _dict: - args['output'] = OutputData._from_dict(_dict.get('output')) + if (query_type := _dict.get('query_type')) is not None: + args['query_type'] = query_type else: raise ValueError( - 'Required property \'output\' not present in MessageResponse JSON' + 'Required property \'query_type\' not present in DialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill JSON' ) - if 'actions' in _dict: - args['actions'] = [ - DialogNodeAction._from_dict(x) for x in (_dict.get('actions')) + if (filter := _dict.get('filter')) is not None: + args['filter'] = filter + if (discovery_version := _dict.get('discovery_version')) is not None: + args['discovery_version'] = discovery_version + if (channels := _dict.get('channels')) is not None: + args['channels'] = [ + ResponseGenericChannel.from_dict(v) for v in channels ] return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a DialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'input') and self.input is not None: - _dict['input'] = self.input._to_dict() - if hasattr(self, 'intents') and self.intents is not None: - _dict['intents'] = [x._to_dict() for x in self.intents] - if hasattr(self, 'entities') and self.entities is not None: - _dict['entities'] = [x._to_dict() for x in self.entities] + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, 'query') and self.query is not None: + _dict['query'] = self.query + if hasattr(self, 'query_type') and self.query_type is not None: + _dict['query_type'] = self.query_type + if hasattr(self, 'filter') and self.filter is not None: + _dict['filter'] = self.filter if hasattr(self, - 'alternate_intents') and self.alternate_intents is not None: - _dict['alternate_intents'] = self.alternate_intents - if hasattr(self, 'context') and self.context is not None: - _dict['context'] = self.context._to_dict() - if hasattr(self, 'output') and self.output is not None: - _dict['output'] = self.output._to_dict() - if hasattr(self, 'actions') and self.actions is not None: - _dict['actions'] = [x._to_dict() for x in self.actions] + 'discovery_version') and self.discovery_version is not None: + _dict['discovery_version'] = self.discovery_version + if hasattr(self, 'channels') and self.channels is not None: + channels_list = [] + for v in self.channels: + if isinstance(v, dict): + channels_list.append(v) + else: + channels_list.append(v.to_dict()) + _dict['channels'] = channels_list return _dict - def __str__(self): - """Return a `str` version of this MessageResponse object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __eq__(self, other): + def __str__(self) -> str: + """Return a `str` version of this DialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__( + self, + other: 'DialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill' + ) -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__( + self, + other: 'DialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill' + ) -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other + class QueryTypeEnum(str, Enum): + """ + The type of the search query. + """ -class OutputData(object): - """ - An output object that includes the response to the user, the dialog nodes that were - triggered, and messages from the log. + NATURAL_LANGUAGE = 'natural_language' + DISCOVERY_QUERY_LANGUAGE = 'discovery_query_language' - :attr list[LogMessage] log_messages: An array of up to 50 messages logged with the - request. - :attr list[str] text: An array of responses to the user. - :attr list[DialogRuntimeResponseGeneric] generic: (optional) Output intended for any - channel. It is the responsibility of the client application to implement the supported - response types. - :attr list[str] nodes_visited: (optional) An array of the nodes that were triggered to - create the response, in the order in which they were visited. This information is - useful for debugging and for tracing the path taken through the node tree. - :attr list[DialogNodeVisitedDetails] nodes_visited_details: (optional) An array of - objects containing detailed diagnostic information about the nodes that were triggered - during processing of the input message. Included only if **nodes_visited_details** is - set to `true` in the message request. - """ - - def __init__(self, - log_messages, - text, - generic=None, - nodes_visited=None, - nodes_visited_details=None, - **kwargs): - """ - Initialize a OutputData object. - :param list[LogMessage] log_messages: An array of up to 50 messages logged with - the request. - :param list[str] text: An array of responses to the user. - :param list[DialogRuntimeResponseGeneric] generic: (optional) Output intended for - any channel. It is the responsibility of the client application to implement the - supported response types. - :param list[str] nodes_visited: (optional) An array of the nodes that were - triggered to create the response, in the order in which they were visited. This - information is useful for debugging and for tracing the path taken through the - node tree. - :param list[DialogNodeVisitedDetails] nodes_visited_details: (optional) An array - of objects containing detailed diagnostic information about the nodes that were - triggered during processing of the input message. Included only if - **nodes_visited_details** is set to `true` in the message request. - :param **kwargs: (optional) Any additional properties. - """ - self.log_messages = log_messages - self.text = text - self.generic = generic - self.nodes_visited = nodes_visited - self.nodes_visited_details = nodes_visited_details - for _key, _value in kwargs.items(): - setattr(self, _key, _value) +class DialogNodeOutputGenericDialogNodeOutputResponseTypeText( + DialogNodeOutputGeneric): + """ + DialogNodeOutputGenericDialogNodeOutputResponseTypeText. + + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + :param List[DialogNodeOutputTextValuesElement] values: A list of one or more + objects defining text responses. + :param str selection_policy: (optional) How a response is selected from the + list, if more than one response is specified. + :param str delimiter: (optional) The delimiter to use as a separator between + responses when `selection_policy`=`multiline`. + :param List[ResponseGenericChannel] channels: (optional) An array of objects + specifying channels for which the response is intended. + """ + + def __init__( + self, + response_type: str, + values: List['DialogNodeOutputTextValuesElement'], + *, + selection_policy: Optional[str] = None, + delimiter: Optional[str] = None, + channels: Optional[List['ResponseGenericChannel']] = None, + ) -> None: + """ + Initialize a DialogNodeOutputGenericDialogNodeOutputResponseTypeText object. + + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + :param List[DialogNodeOutputTextValuesElement] values: A list of one or + more objects defining text responses. + :param str selection_policy: (optional) How a response is selected from the + list, if more than one response is specified. + :param str delimiter: (optional) The delimiter to use as a separator + between responses when `selection_policy`=`multiline`. + :param List[ResponseGenericChannel] channels: (optional) An array of + objects specifying channels for which the response is intended. + """ + # pylint: disable=super-init-not-called + self.response_type = response_type + self.values = values + self.selection_policy = selection_policy + self.delimiter = delimiter + self.channels = channels @classmethod - def _from_dict(cls, _dict): - """Initialize a OutputData object from a json dictionary.""" + def from_dict( + cls, _dict: Dict + ) -> 'DialogNodeOutputGenericDialogNodeOutputResponseTypeText': + """Initialize a DialogNodeOutputGenericDialogNodeOutputResponseTypeText object from a json dictionary.""" args = {} - xtra = _dict.copy() - if 'log_messages' in _dict: - args['log_messages'] = [ - LogMessage._from_dict(x) for x in (_dict.get('log_messages')) - ] - del xtra['log_messages'] + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type else: raise ValueError( - 'Required property \'log_messages\' not present in OutputData JSON' + 'Required property \'response_type\' not present in DialogNodeOutputGenericDialogNodeOutputResponseTypeText JSON' ) - if 'text' in _dict: - args['text'] = _dict.get('text') - del xtra['text'] + if (values := _dict.get('values')) is not None: + args['values'] = [ + DialogNodeOutputTextValuesElement.from_dict(v) for v in values + ] else: raise ValueError( - 'Required property \'text\' not present in OutputData JSON') - if 'generic' in _dict: - args['generic'] = [ - DialogRuntimeResponseGeneric._from_dict(x) - for x in (_dict.get('generic')) - ] - del xtra['generic'] - if 'nodes_visited' in _dict: - args['nodes_visited'] = _dict.get('nodes_visited') - del xtra['nodes_visited'] - if 'nodes_visited_details' in _dict: - args['nodes_visited_details'] = [ - DialogNodeVisitedDetails._from_dict(x) - for x in (_dict.get('nodes_visited_details')) + 'Required property \'values\' not present in DialogNodeOutputGenericDialogNodeOutputResponseTypeText JSON' + ) + if (selection_policy := _dict.get('selection_policy')) is not None: + args['selection_policy'] = selection_policy + if (delimiter := _dict.get('delimiter')) is not None: + args['delimiter'] = delimiter + if (channels := _dict.get('channels')) is not None: + args['channels'] = [ + ResponseGenericChannel.from_dict(v) for v in channels ] - del xtra['nodes_visited_details'] - args.update(xtra) return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a DialogNodeOutputGenericDialogNodeOutputResponseTypeText object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'log_messages') and self.log_messages is not None: - _dict['log_messages'] = [x._to_dict() for x in self.log_messages] - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - if hasattr(self, 'generic') and self.generic is not None: - _dict['generic'] = [x._to_dict() for x in self.generic] - if hasattr(self, 'nodes_visited') and self.nodes_visited is not None: - _dict['nodes_visited'] = self.nodes_visited - if hasattr(self, 'nodes_visited_details' - ) and self.nodes_visited_details is not None: - _dict['nodes_visited_details'] = [ - x._to_dict() for x in self.nodes_visited_details - ] - if hasattr(self, '_additionalProperties'): - for _key in self._additionalProperties: - _value = getattr(self, _key, None) - if _value is not None: - _dict[_key] = _value + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, 'values') and self.values is not None: + values_list = [] + for v in self.values: + if isinstance(v, dict): + values_list.append(v) + else: + values_list.append(v.to_dict()) + _dict['values'] = values_list + if hasattr(self, + 'selection_policy') and self.selection_policy is not None: + _dict['selection_policy'] = self.selection_policy + if hasattr(self, 'delimiter') and self.delimiter is not None: + _dict['delimiter'] = self.delimiter + if hasattr(self, 'channels') and self.channels is not None: + channels_list = [] + for v in self.channels: + if isinstance(v, dict): + channels_list.append(v) + else: + channels_list.append(v.to_dict()) + _dict['channels'] = channels_list return _dict - def __setattr__(self, name, value): - properties = { - 'log_messages', 'text', 'generic', 'nodes_visited', - 'nodes_visited_details' - } - if not hasattr(self, '_additionalProperties'): - super(OutputData, self).__setattr__('_additionalProperties', set()) - if name not in properties: - self._additionalProperties.add(name) - super(OutputData, self).__setattr__(name, value) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __str__(self): - """Return a `str` version of this OutputData object.""" - return json.dumps(self._to_dict(), indent=2) + def __str__(self) -> str: + """Return a `str` version of this DialogNodeOutputGenericDialogNodeOutputResponseTypeText object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__( + self, other: 'DialogNodeOutputGenericDialogNodeOutputResponseTypeText' + ) -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__( + self, other: 'DialogNodeOutputGenericDialogNodeOutputResponseTypeText' + ) -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other + class SelectionPolicyEnum(str, Enum): + """ + How a response is selected from the list, if more than one response is specified. + """ -class Pagination(object): - """ - The pagination data for the returned objects. + SEQUENTIAL = 'sequential' + RANDOM = 'random' + MULTILINE = 'multiline' - :attr str refresh_url: The URL that will return the same page of results. - :attr str next_url: (optional) The URL that will return the next page of results. - :attr int total: (optional) Reserved for future use. - :attr int matched: (optional) Reserved for future use. - :attr str refresh_cursor: (optional) A token identifying the current page of results. - :attr str next_cursor: (optional) A token identifying the next page of results. - """ - def __init__(self, - refresh_url, - next_url=None, - total=None, - matched=None, - refresh_cursor=None, - next_cursor=None): - """ - Initialize a Pagination object. +class DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined( + DialogNodeOutputGeneric): + """ + DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined. + + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + :param dict user_defined: An object containing any properties for the + user-defined response type. The total size of this object cannot exceed 5000 + bytes. + :param List[ResponseGenericChannel] channels: (optional) An array of objects + specifying channels for which the response is intended. + """ - :param str refresh_url: The URL that will return the same page of results. - :param str next_url: (optional) The URL that will return the next page of results. - :param int total: (optional) Reserved for future use. - :param int matched: (optional) Reserved for future use. - :param str refresh_cursor: (optional) A token identifying the current page of - results. - :param str next_cursor: (optional) A token identifying the next page of results. - """ - self.refresh_url = refresh_url - self.next_url = next_url - self.total = total - self.matched = matched - self.refresh_cursor = refresh_cursor - self.next_cursor = next_cursor + def __init__( + self, + response_type: str, + user_defined: dict, + *, + channels: Optional[List['ResponseGenericChannel']] = None, + ) -> None: + """ + Initialize a DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined object. + + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + :param dict user_defined: An object containing any properties for the + user-defined response type. The total size of this object cannot exceed + 5000 bytes. + :param List[ResponseGenericChannel] channels: (optional) An array of + objects specifying channels for which the response is intended. + """ + # pylint: disable=super-init-not-called + self.response_type = response_type + self.user_defined = user_defined + self.channels = channels @classmethod - def _from_dict(cls, _dict): - """Initialize a Pagination object from a json dictionary.""" + def from_dict( + cls, _dict: Dict + ) -> 'DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined': + """Initialize a DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined object from a json dictionary.""" args = {} - if 'refresh_url' in _dict: - args['refresh_url'] = _dict.get('refresh_url') + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type else: raise ValueError( - 'Required property \'refresh_url\' not present in Pagination JSON' + 'Required property \'response_type\' not present in DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined JSON' + ) + if (user_defined := _dict.get('user_defined')) is not None: + args['user_defined'] = user_defined + else: + raise ValueError( + 'Required property \'user_defined\' not present in DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined JSON' ) - if 'next_url' in _dict: - args['next_url'] = _dict.get('next_url') - if 'total' in _dict: - args['total'] = _dict.get('total') - if 'matched' in _dict: - args['matched'] = _dict.get('matched') - if 'refresh_cursor' in _dict: - args['refresh_cursor'] = _dict.get('refresh_cursor') - if 'next_cursor' in _dict: - args['next_cursor'] = _dict.get('next_cursor') + if (channels := _dict.get('channels')) is not None: + args['channels'] = [ + ResponseGenericChannel.from_dict(v) for v in channels + ] return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'refresh_url') and self.refresh_url is not None: - _dict['refresh_url'] = self.refresh_url - if hasattr(self, 'next_url') and self.next_url is not None: - _dict['next_url'] = self.next_url - if hasattr(self, 'total') and self.total is not None: - _dict['total'] = self.total - if hasattr(self, 'matched') and self.matched is not None: - _dict['matched'] = self.matched - if hasattr(self, 'refresh_cursor') and self.refresh_cursor is not None: - _dict['refresh_cursor'] = self.refresh_cursor - if hasattr(self, 'next_cursor') and self.next_cursor is not None: - _dict['next_cursor'] = self.next_cursor + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, 'user_defined') and self.user_defined is not None: + _dict['user_defined'] = self.user_defined + if hasattr(self, 'channels') and self.channels is not None: + channels_list = [] + for v in self.channels: + if isinstance(v, dict): + channels_list.append(v) + else: + channels_list.append(v.to_dict()) + _dict['channels'] = channels_list return _dict - def __str__(self): - """Return a `str` version of this Pagination object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__( + self, + other: 'DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined' + ) -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__( + self, + other: 'DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined' + ) -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class RuntimeEntity(object): +class DialogNodeOutputGenericDialogNodeOutputResponseTypeVideo( + DialogNodeOutputGeneric): + """ + DialogNodeOutputGenericDialogNodeOutputResponseTypeVideo. + + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + :param str source: The `https:` URL of the video. + :param str title: (optional) An optional title to show before the response. + :param str description: (optional) An optional description to show with the + response. + :param List[ResponseGenericChannel] channels: (optional) An array of objects + specifying channels for which the response is intended. If **channels** is + present, the response is intended for a built-in integration and should not be + handled by an API client. + :param dict channel_options: (optional) For internal use only. + :param str alt_text: (optional) Descriptive text that can be used for screen + readers or other situations where the video cannot be seen. """ - A term from the request that was identified as an entity. - - :attr str entity: An entity detected in the input. - :attr list[int] location: An array of zero-based character offsets that indicate where - the detected entity values begin and end in the input text. - :attr str value: The term in the input text that was recognized as an entity value. - :attr float confidence: (optional) A decimal percentage that represents Watson's - confidence in the entity. - :attr dict metadata: (optional) Any metadata for the entity. - :attr list[CaptureGroup] groups: (optional) The recognized capture groups for the - entity, as defined by the entity pattern. - """ - - def __init__(self, - entity, - location, - value, - confidence=None, - metadata=None, - groups=None, - **kwargs): - """ - Initialize a RuntimeEntity object. - :param str entity: An entity detected in the input. - :param list[int] location: An array of zero-based character offsets that indicate - where the detected entity values begin and end in the input text. - :param str value: The term in the input text that was recognized as an entity - value. - :param float confidence: (optional) A decimal percentage that represents Watson's - confidence in the entity. - :param dict metadata: (optional) Any metadata for the entity. - :param list[CaptureGroup] groups: (optional) The recognized capture groups for the - entity, as defined by the entity pattern. - :param **kwargs: (optional) Any additional properties. - """ - self.entity = entity - self.location = location - self.value = value - self.confidence = confidence - self.metadata = metadata - self.groups = groups - for _key, _value in kwargs.items(): - setattr(self, _key, _value) + def __init__( + self, + response_type: str, + source: str, + *, + title: Optional[str] = None, + description: Optional[str] = None, + channels: Optional[List['ResponseGenericChannel']] = None, + channel_options: Optional[dict] = None, + alt_text: Optional[str] = None, + ) -> None: + """ + Initialize a DialogNodeOutputGenericDialogNodeOutputResponseTypeVideo object. + + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + :param str source: The `https:` URL of the video. + :param str title: (optional) An optional title to show before the response. + :param str description: (optional) An optional description to show with the + response. + :param List[ResponseGenericChannel] channels: (optional) An array of + objects specifying channels for which the response is intended. If + **channels** is present, the response is intended for a built-in + integration and should not be handled by an API client. + :param dict channel_options: (optional) For internal use only. + :param str alt_text: (optional) Descriptive text that can be used for + screen readers or other situations where the video cannot be seen. + """ + # pylint: disable=super-init-not-called + self.response_type = response_type + self.source = source + self.title = title + self.description = description + self.channels = channels + self.channel_options = channel_options + self.alt_text = alt_text @classmethod - def _from_dict(cls, _dict): - """Initialize a RuntimeEntity object from a json dictionary.""" + def from_dict( + cls, _dict: Dict + ) -> 'DialogNodeOutputGenericDialogNodeOutputResponseTypeVideo': + """Initialize a DialogNodeOutputGenericDialogNodeOutputResponseTypeVideo object from a json dictionary.""" args = {} - xtra = _dict.copy() - if 'entity' in _dict: - args['entity'] = _dict.get('entity') - del xtra['entity'] + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type else: raise ValueError( - 'Required property \'entity\' not present in RuntimeEntity JSON' + 'Required property \'response_type\' not present in DialogNodeOutputGenericDialogNodeOutputResponseTypeVideo JSON' ) - if 'location' in _dict: - args['location'] = _dict.get('location') - del xtra['location'] + if (source := _dict.get('source')) is not None: + args['source'] = source else: raise ValueError( - 'Required property \'location\' not present in RuntimeEntity JSON' + 'Required property \'source\' not present in DialogNodeOutputGenericDialogNodeOutputResponseTypeVideo JSON' ) - if 'value' in _dict: - args['value'] = _dict.get('value') - del xtra['value'] - else: - raise ValueError( - 'Required property \'value\' not present in RuntimeEntity JSON') - if 'confidence' in _dict: - args['confidence'] = _dict.get('confidence') - del xtra['confidence'] - if 'metadata' in _dict: - args['metadata'] = _dict.get('metadata') - del xtra['metadata'] - if 'groups' in _dict: - args['groups'] = [ - CaptureGroup._from_dict(x) for x in (_dict.get('groups')) + if (title := _dict.get('title')) is not None: + args['title'] = title + if (description := _dict.get('description')) is not None: + args['description'] = description + if (channels := _dict.get('channels')) is not None: + args['channels'] = [ + ResponseGenericChannel.from_dict(v) for v in channels ] - del xtra['groups'] - args.update(xtra) + if (channel_options := _dict.get('channel_options')) is not None: + args['channel_options'] = channel_options + if (alt_text := _dict.get('alt_text')) is not None: + args['alt_text'] = alt_text return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a DialogNodeOutputGenericDialogNodeOutputResponseTypeVideo object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'entity') and self.entity is not None: - _dict['entity'] = self.entity - if hasattr(self, 'location') and self.location is not None: - _dict['location'] = self.location - if hasattr(self, 'value') and self.value is not None: - _dict['value'] = self.value - if hasattr(self, 'confidence') and self.confidence is not None: - _dict['confidence'] = self.confidence - if hasattr(self, 'metadata') and self.metadata is not None: - _dict['metadata'] = self.metadata - if hasattr(self, 'groups') and self.groups is not None: - _dict['groups'] = [x._to_dict() for x in self.groups] - if hasattr(self, '_additionalProperties'): - for _key in self._additionalProperties: - _value = getattr(self, _key, None) - if _value is not None: - _dict[_key] = _value + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, 'source') and self.source is not None: + _dict['source'] = self.source + if hasattr(self, 'title') and self.title is not None: + _dict['title'] = self.title + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'channels') and self.channels is not None: + channels_list = [] + for v in self.channels: + if isinstance(v, dict): + channels_list.append(v) + else: + channels_list.append(v.to_dict()) + _dict['channels'] = channels_list + if hasattr(self, + 'channel_options') and self.channel_options is not None: + _dict['channel_options'] = self.channel_options + if hasattr(self, 'alt_text') and self.alt_text is not None: + _dict['alt_text'] = self.alt_text return _dict - def __setattr__(self, name, value): - properties = { - 'entity', 'location', 'value', 'confidence', 'metadata', 'groups' - } - if not hasattr(self, '_additionalProperties'): - super(RuntimeEntity, self).__setattr__('_additionalProperties', - set()) - if name not in properties: - self._additionalProperties.add(name) - super(RuntimeEntity, self).__setattr__(name, value) - - def __str__(self): - """Return a `str` version of this RuntimeEntity object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DialogNodeOutputGenericDialogNodeOutputResponseTypeVideo object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__( + self, other: 'DialogNodeOutputGenericDialogNodeOutputResponseTypeVideo' + ) -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__( + self, other: 'DialogNodeOutputGenericDialogNodeOutputResponseTypeVideo' + ) -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class RuntimeIntent(object): +class RuntimeResponseGenericRuntimeResponseTypeAudio(RuntimeResponseGeneric): """ - An intent identified in the user input. - - :attr str intent: The name of the recognized intent. - :attr float confidence: A decimal percentage that represents Watson's confidence in - the intent. + RuntimeResponseGenericRuntimeResponseTypeAudio. + + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + :param str source: The `https:` URL of the audio clip. + :param str title: (optional) The title or introductory text to show before the + response. + :param str description: (optional) The description to show with the response. + :param List[ResponseGenericChannel] channels: (optional) An array of objects + specifying channels for which the response is intended. If **channels** is + present, the response is intended for a built-in integration and should not be + handled by an API client. + :param dict channel_options: (optional) For internal use only. + :param str alt_text: (optional) Descriptive text that can be used for screen + readers or other situations where the audio player cannot be seen. """ - def __init__(self, intent, confidence, **kwargs): - """ - Initialize a RuntimeIntent object. - - :param str intent: The name of the recognized intent. - :param float confidence: A decimal percentage that represents Watson's confidence - in the intent. - :param **kwargs: (optional) Any additional properties. - """ - self.intent = intent - self.confidence = confidence - for _key, _value in kwargs.items(): - setattr(self, _key, _value) + def __init__( + self, + response_type: str, + source: str, + *, + title: Optional[str] = None, + description: Optional[str] = None, + channels: Optional[List['ResponseGenericChannel']] = None, + channel_options: Optional[dict] = None, + alt_text: Optional[str] = None, + ) -> None: + """ + Initialize a RuntimeResponseGenericRuntimeResponseTypeAudio object. + + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + :param str source: The `https:` URL of the audio clip. + :param str title: (optional) The title or introductory text to show before + the response. + :param str description: (optional) The description to show with the + response. + :param List[ResponseGenericChannel] channels: (optional) An array of + objects specifying channels for which the response is intended. If + **channels** is present, the response is intended for a built-in + integration and should not be handled by an API client. + :param dict channel_options: (optional) For internal use only. + :param str alt_text: (optional) Descriptive text that can be used for + screen readers or other situations where the audio player cannot be seen. + """ + # pylint: disable=super-init-not-called + self.response_type = response_type + self.source = source + self.title = title + self.description = description + self.channels = channels + self.channel_options = channel_options + self.alt_text = alt_text @classmethod - def _from_dict(cls, _dict): - """Initialize a RuntimeIntent object from a json dictionary.""" + def from_dict( + cls, + _dict: Dict) -> 'RuntimeResponseGenericRuntimeResponseTypeAudio': + """Initialize a RuntimeResponseGenericRuntimeResponseTypeAudio object from a json dictionary.""" args = {} - xtra = _dict.copy() - if 'intent' in _dict: - args['intent'] = _dict.get('intent') - del xtra['intent'] + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type else: raise ValueError( - 'Required property \'intent\' not present in RuntimeIntent JSON' + 'Required property \'response_type\' not present in RuntimeResponseGenericRuntimeResponseTypeAudio JSON' ) - if 'confidence' in _dict: - args['confidence'] = _dict.get('confidence') - del xtra['confidence'] + if (source := _dict.get('source')) is not None: + args['source'] = source else: raise ValueError( - 'Required property \'confidence\' not present in RuntimeIntent JSON' + 'Required property \'source\' not present in RuntimeResponseGenericRuntimeResponseTypeAudio JSON' ) - args.update(xtra) + if (title := _dict.get('title')) is not None: + args['title'] = title + if (description := _dict.get('description')) is not None: + args['description'] = description + if (channels := _dict.get('channels')) is not None: + args['channels'] = [ + ResponseGenericChannel.from_dict(v) for v in channels + ] + if (channel_options := _dict.get('channel_options')) is not None: + args['channel_options'] = channel_options + if (alt_text := _dict.get('alt_text')) is not None: + args['alt_text'] = alt_text return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a RuntimeResponseGenericRuntimeResponseTypeAudio object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'intent') and self.intent is not None: - _dict['intent'] = self.intent - if hasattr(self, 'confidence') and self.confidence is not None: - _dict['confidence'] = self.confidence - if hasattr(self, '_additionalProperties'): - for _key in self._additionalProperties: - _value = getattr(self, _key, None) - if _value is not None: - _dict[_key] = _value + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, 'source') and self.source is not None: + _dict['source'] = self.source + if hasattr(self, 'title') and self.title is not None: + _dict['title'] = self.title + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'channels') and self.channels is not None: + channels_list = [] + for v in self.channels: + if isinstance(v, dict): + channels_list.append(v) + else: + channels_list.append(v.to_dict()) + _dict['channels'] = channels_list + if hasattr(self, + 'channel_options') and self.channel_options is not None: + _dict['channel_options'] = self.channel_options + if hasattr(self, 'alt_text') and self.alt_text is not None: + _dict['alt_text'] = self.alt_text return _dict - def __setattr__(self, name, value): - properties = {'intent', 'confidence'} - if not hasattr(self, '_additionalProperties'): - super(RuntimeIntent, self).__setattr__('_additionalProperties', - set()) - if name not in properties: - self._additionalProperties.add(name) - super(RuntimeIntent, self).__setattr__(name, value) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __str__(self): - """Return a `str` version of this RuntimeIntent object.""" - return json.dumps(self._to_dict(), indent=2) + def __str__(self) -> str: + """Return a `str` version of this RuntimeResponseGenericRuntimeResponseTypeAudio object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, + other: 'RuntimeResponseGenericRuntimeResponseTypeAudio') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, + other: 'RuntimeResponseGenericRuntimeResponseTypeAudio') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class Synonym(object): +class RuntimeResponseGenericRuntimeResponseTypeChannelTransfer( + RuntimeResponseGeneric): """ - Synonym. - - :attr str synonym: The text of the synonym. This string must conform to the following - restrictions: - - It cannot contain carriage return, newline, or tab characters. - - It cannot consist of only whitespace characters. - - It must be no longer than 64 characters. - :attr datetime created: (optional) The timestamp for creation of the object. - :attr datetime updated: (optional) The timestamp for the most recent update to the - object. + RuntimeResponseGenericRuntimeResponseTypeChannelTransfer. + + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + **Note:** The `channel_transfer` response type is not supported on IBM Cloud + Pak for Data. + :param str message_to_user: The message to display to the user when initiating a + channel transfer. + :param ChannelTransferInfo transfer_info: Information used by an integration to + transfer the conversation to a different channel. + :param List[ResponseGenericChannel] channels: (optional) An array of objects + specifying channels for which the response is intended. If **channels** is + present, the response is intended only for a built-in integration and should not + be handled by an API client. """ - def __init__(self, synonym, created=None, updated=None): - """ - Initialize a Synonym object. - - :param str synonym: The text of the synonym. This string must conform to the - following restrictions: - - It cannot contain carriage return, newline, or tab characters. - - It cannot consist of only whitespace characters. - - It must be no longer than 64 characters. - :param datetime created: (optional) The timestamp for creation of the object. - :param datetime updated: (optional) The timestamp for the most recent update to - the object. - """ - self.synonym = synonym - self.created = created - self.updated = updated + def __init__( + self, + response_type: str, + message_to_user: str, + transfer_info: 'ChannelTransferInfo', + *, + channels: Optional[List['ResponseGenericChannel']] = None, + ) -> None: + """ + Initialize a RuntimeResponseGenericRuntimeResponseTypeChannelTransfer object. + + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + **Note:** The `channel_transfer` response type is not supported on IBM + Cloud Pak for Data. + :param str message_to_user: The message to display to the user when + initiating a channel transfer. + :param ChannelTransferInfo transfer_info: Information used by an + integration to transfer the conversation to a different channel. + :param List[ResponseGenericChannel] channels: (optional) An array of + objects specifying channels for which the response is intended. If + **channels** is present, the response is intended only for a built-in + integration and should not be handled by an API client. + """ + # pylint: disable=super-init-not-called + self.response_type = response_type + self.message_to_user = message_to_user + self.transfer_info = transfer_info + self.channels = channels @classmethod - def _from_dict(cls, _dict): - """Initialize a Synonym object from a json dictionary.""" + def from_dict( + cls, _dict: Dict + ) -> 'RuntimeResponseGenericRuntimeResponseTypeChannelTransfer': + """Initialize a RuntimeResponseGenericRuntimeResponseTypeChannelTransfer object from a json dictionary.""" args = {} - if 'synonym' in _dict: - args['synonym'] = _dict.get('synonym') + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type else: raise ValueError( - 'Required property \'synonym\' not present in Synonym JSON') - if 'created' in _dict: - args['created'] = string_to_datetime(_dict.get('created')) - if 'updated' in _dict: - args['updated'] = string_to_datetime(_dict.get('updated')) + 'Required property \'response_type\' not present in RuntimeResponseGenericRuntimeResponseTypeChannelTransfer JSON' + ) + if (message_to_user := _dict.get('message_to_user')) is not None: + args['message_to_user'] = message_to_user + else: + raise ValueError( + 'Required property \'message_to_user\' not present in RuntimeResponseGenericRuntimeResponseTypeChannelTransfer JSON' + ) + if (transfer_info := _dict.get('transfer_info')) is not None: + args['transfer_info'] = ChannelTransferInfo.from_dict(transfer_info) + else: + raise ValueError( + 'Required property \'transfer_info\' not present in RuntimeResponseGenericRuntimeResponseTypeChannelTransfer JSON' + ) + if (channels := _dict.get('channels')) is not None: + args['channels'] = [ + ResponseGenericChannel.from_dict(v) for v in channels + ] return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a RuntimeResponseGenericRuntimeResponseTypeChannelTransfer object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'synonym') and self.synonym is not None: - _dict['synonym'] = self.synonym - if hasattr(self, 'created') and self.created is not None: - _dict['created'] = datetime_to_string(self.created) - if hasattr(self, 'updated') and self.updated is not None: - _dict['updated'] = datetime_to_string(self.updated) + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, + 'message_to_user') and self.message_to_user is not None: + _dict['message_to_user'] = self.message_to_user + if hasattr(self, 'transfer_info') and self.transfer_info is not None: + if isinstance(self.transfer_info, dict): + _dict['transfer_info'] = self.transfer_info + else: + _dict['transfer_info'] = self.transfer_info.to_dict() + if hasattr(self, 'channels') and self.channels is not None: + channels_list = [] + for v in self.channels: + if isinstance(v, dict): + channels_list.append(v) + else: + channels_list.append(v.to_dict()) + _dict['channels'] = channels_list return _dict - def __str__(self): - """Return a `str` version of this Synonym object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __eq__(self, other): + def __str__(self) -> str: + """Return a `str` version of this RuntimeResponseGenericRuntimeResponseTypeChannelTransfer object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__( + self, other: 'RuntimeResponseGenericRuntimeResponseTypeChannelTransfer' + ) -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__( + self, other: 'RuntimeResponseGenericRuntimeResponseTypeChannelTransfer' + ) -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class SynonymCollection(object): +class RuntimeResponseGenericRuntimeResponseTypeConnectToAgent( + RuntimeResponseGeneric): """ - SynonymCollection. - - :attr list[Synonym] synonyms: An array of synonyms. - :attr Pagination pagination: The pagination data for the returned objects. + RuntimeResponseGenericRuntimeResponseTypeConnectToAgent. + + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + :param str message_to_human_agent: (optional) A message to be sent to the human + agent who will be taking over the conversation. + :param AgentAvailabilityMessage agent_available: (optional) An optional message + to be displayed to the user to indicate that the conversation will be + transferred to the next available agent. + :param AgentAvailabilityMessage agent_unavailable: (optional) An optional + message to be displayed to the user to indicate that no online agent is + available to take over the conversation. + :param DialogNodeOutputConnectToAgentTransferInfo transfer_info: (optional) + Routing or other contextual information to be used by target service desk + systems. + :param str topic: (optional) A label identifying the topic of the conversation, + derived from the **title** property of the relevant node or the **topic** + property of the dialog node response. + :param str dialog_node: (optional) The unique ID of the dialog node that the + **topic** property is taken from. The **topic** property is populated using the + value of the dialog node's **title** property. + :param List[ResponseGenericChannel] channels: (optional) An array of objects + specifying channels for which the response is intended. If **channels** is + present, the response is intended for a built-in integration and should not be + handled by an API client. """ - def __init__(self, synonyms, pagination): - """ - Initialize a SynonymCollection object. - - :param list[Synonym] synonyms: An array of synonyms. - :param Pagination pagination: The pagination data for the returned objects. - """ - self.synonyms = synonyms - self.pagination = pagination + def __init__( + self, + response_type: str, + *, + message_to_human_agent: Optional[str] = None, + agent_available: Optional['AgentAvailabilityMessage'] = None, + agent_unavailable: Optional['AgentAvailabilityMessage'] = None, + transfer_info: Optional[ + 'DialogNodeOutputConnectToAgentTransferInfo'] = None, + topic: Optional[str] = None, + dialog_node: Optional[str] = None, + channels: Optional[List['ResponseGenericChannel']] = None, + ) -> None: + """ + Initialize a RuntimeResponseGenericRuntimeResponseTypeConnectToAgent object. + + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + :param str message_to_human_agent: (optional) A message to be sent to the + human agent who will be taking over the conversation. + :param AgentAvailabilityMessage agent_available: (optional) An optional + message to be displayed to the user to indicate that the conversation will + be transferred to the next available agent. + :param AgentAvailabilityMessage agent_unavailable: (optional) An optional + message to be displayed to the user to indicate that no online agent is + available to take over the conversation. + :param DialogNodeOutputConnectToAgentTransferInfo transfer_info: (optional) + Routing or other contextual information to be used by target service desk + systems. + :param str topic: (optional) A label identifying the topic of the + conversation, derived from the **title** property of the relevant node or + the **topic** property of the dialog node response. + :param str dialog_node: (optional) The unique ID of the dialog node that + the **topic** property is taken from. The **topic** property is populated + using the value of the dialog node's **title** property. + :param List[ResponseGenericChannel] channels: (optional) An array of + objects specifying channels for which the response is intended. If + **channels** is present, the response is intended for a built-in + integration and should not be handled by an API client. + """ + # pylint: disable=super-init-not-called + self.response_type = response_type + self.message_to_human_agent = message_to_human_agent + self.agent_available = agent_available + self.agent_unavailable = agent_unavailable + self.transfer_info = transfer_info + self.topic = topic + self.dialog_node = dialog_node + self.channels = channels @classmethod - def _from_dict(cls, _dict): - """Initialize a SynonymCollection object from a json dictionary.""" + def from_dict( + cls, _dict: Dict + ) -> 'RuntimeResponseGenericRuntimeResponseTypeConnectToAgent': + """Initialize a RuntimeResponseGenericRuntimeResponseTypeConnectToAgent object from a json dictionary.""" args = {} - if 'synonyms' in _dict: - args['synonyms'] = [ - Synonym._from_dict(x) for x in (_dict.get('synonyms')) - ] - else: - raise ValueError( - 'Required property \'synonyms\' not present in SynonymCollection JSON' - ) - if 'pagination' in _dict: - args['pagination'] = Pagination._from_dict(_dict.get('pagination')) + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type else: raise ValueError( - 'Required property \'pagination\' not present in SynonymCollection JSON' + 'Required property \'response_type\' not present in RuntimeResponseGenericRuntimeResponseTypeConnectToAgent JSON' ) + if (message_to_human_agent := + _dict.get('message_to_human_agent')) is not None: + args['message_to_human_agent'] = message_to_human_agent + if (agent_available := _dict.get('agent_available')) is not None: + args['agent_available'] = AgentAvailabilityMessage.from_dict( + agent_available) + if (agent_unavailable := _dict.get('agent_unavailable')) is not None: + args['agent_unavailable'] = AgentAvailabilityMessage.from_dict( + agent_unavailable) + if (transfer_info := _dict.get('transfer_info')) is not None: + args[ + 'transfer_info'] = DialogNodeOutputConnectToAgentTransferInfo.from_dict( + transfer_info) + if (topic := _dict.get('topic')) is not None: + args['topic'] = topic + if (dialog_node := _dict.get('dialog_node')) is not None: + args['dialog_node'] = dialog_node + if (channels := _dict.get('channels')) is not None: + args['channels'] = [ + ResponseGenericChannel.from_dict(v) for v in channels + ] return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a RuntimeResponseGenericRuntimeResponseTypeConnectToAgent object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'synonyms') and self.synonyms is not None: - _dict['synonyms'] = [x._to_dict() for x in self.synonyms] - if hasattr(self, 'pagination') and self.pagination is not None: - _dict['pagination'] = self.pagination._to_dict() + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, 'message_to_human_agent' + ) and self.message_to_human_agent is not None: + _dict['message_to_human_agent'] = self.message_to_human_agent + if hasattr(self, + 'agent_available') and self.agent_available is not None: + if isinstance(self.agent_available, dict): + _dict['agent_available'] = self.agent_available + else: + _dict['agent_available'] = self.agent_available.to_dict() + if hasattr(self, + 'agent_unavailable') and self.agent_unavailable is not None: + if isinstance(self.agent_unavailable, dict): + _dict['agent_unavailable'] = self.agent_unavailable + else: + _dict['agent_unavailable'] = self.agent_unavailable.to_dict() + if hasattr(self, 'transfer_info') and self.transfer_info is not None: + if isinstance(self.transfer_info, dict): + _dict['transfer_info'] = self.transfer_info + else: + _dict['transfer_info'] = self.transfer_info.to_dict() + if hasattr(self, 'topic') and self.topic is not None: + _dict['topic'] = self.topic + if hasattr(self, 'dialog_node') and self.dialog_node is not None: + _dict['dialog_node'] = self.dialog_node + if hasattr(self, 'channels') and self.channels is not None: + channels_list = [] + for v in self.channels: + if isinstance(v, dict): + channels_list.append(v) + else: + channels_list.append(v.to_dict()) + _dict['channels'] = channels_list return _dict - def __str__(self): - """Return a `str` version of this SynonymCollection object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this RuntimeResponseGenericRuntimeResponseTypeConnectToAgent object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__( + self, other: 'RuntimeResponseGenericRuntimeResponseTypeConnectToAgent' + ) -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__( + self, other: 'RuntimeResponseGenericRuntimeResponseTypeConnectToAgent' + ) -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class SystemResponse(object): +class RuntimeResponseGenericRuntimeResponseTypeIframe(RuntimeResponseGeneric): """ - For internal use only. - + RuntimeResponseGenericRuntimeResponseTypeIframe. + + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + :param str source: The `https:` URL of the embeddable content. + :param str title: (optional) The title or introductory text to show before the + response. + :param str description: (optional) The description to show with the response. + :param str image_url: (optional) The URL of an image that shows a preview of the + embedded content. + :param List[ResponseGenericChannel] channels: (optional) An array of objects + specifying channels for which the response is intended. If **channels** is + present, the response is intended for a built-in integration and should not be + handled by an API client. """ - def __init__(self, **kwargs): - """ - Initialize a SystemResponse object. - - :param **kwargs: (optional) Any additional properties. - """ - for _key, _value in kwargs.items(): - setattr(self, _key, _value) + def __init__( + self, + response_type: str, + source: str, + *, + title: Optional[str] = None, + description: Optional[str] = None, + image_url: Optional[str] = None, + channels: Optional[List['ResponseGenericChannel']] = None, + ) -> None: + """ + Initialize a RuntimeResponseGenericRuntimeResponseTypeIframe object. + + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + :param str source: The `https:` URL of the embeddable content. + :param str title: (optional) The title or introductory text to show before + the response. + :param str description: (optional) The description to show with the + response. + :param str image_url: (optional) The URL of an image that shows a preview + of the embedded content. + :param List[ResponseGenericChannel] channels: (optional) An array of + objects specifying channels for which the response is intended. If + **channels** is present, the response is intended for a built-in + integration and should not be handled by an API client. + """ + # pylint: disable=super-init-not-called + self.response_type = response_type + self.source = source + self.title = title + self.description = description + self.image_url = image_url + self.channels = channels @classmethod - def _from_dict(cls, _dict): - """Initialize a SystemResponse object from a json dictionary.""" + def from_dict( + cls, + _dict: Dict) -> 'RuntimeResponseGenericRuntimeResponseTypeIframe': + """Initialize a RuntimeResponseGenericRuntimeResponseTypeIframe object from a json dictionary.""" args = {} - xtra = _dict.copy() - args.update(xtra) + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type + else: + raise ValueError( + 'Required property \'response_type\' not present in RuntimeResponseGenericRuntimeResponseTypeIframe JSON' + ) + if (source := _dict.get('source')) is not None: + args['source'] = source + else: + raise ValueError( + 'Required property \'source\' not present in RuntimeResponseGenericRuntimeResponseTypeIframe JSON' + ) + if (title := _dict.get('title')) is not None: + args['title'] = title + if (description := _dict.get('description')) is not None: + args['description'] = description + if (image_url := _dict.get('image_url')) is not None: + args['image_url'] = image_url + if (channels := _dict.get('channels')) is not None: + args['channels'] = [ + ResponseGenericChannel.from_dict(v) for v in channels + ] return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a RuntimeResponseGenericRuntimeResponseTypeIframe object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, '_additionalProperties'): - for _key in self._additionalProperties: - _value = getattr(self, _key, None) - if _value is not None: - _dict[_key] = _value + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, 'source') and self.source is not None: + _dict['source'] = self.source + if hasattr(self, 'title') and self.title is not None: + _dict['title'] = self.title + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'image_url') and self.image_url is not None: + _dict['image_url'] = self.image_url + if hasattr(self, 'channels') and self.channels is not None: + channels_list = [] + for v in self.channels: + if isinstance(v, dict): + channels_list.append(v) + else: + channels_list.append(v.to_dict()) + _dict['channels'] = channels_list return _dict - def __setattr__(self, name, value): - properties = {} - if not hasattr(self, '_additionalProperties'): - super(SystemResponse, self).__setattr__('_additionalProperties', - set()) - if name not in properties: - self._additionalProperties.add(name) - super(SystemResponse, self).__setattr__(name, value) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __str__(self): - """Return a `str` version of this SystemResponse object.""" - return json.dumps(self._to_dict(), indent=2) + def __str__(self) -> str: + """Return a `str` version of this RuntimeResponseGenericRuntimeResponseTypeIframe object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__( + self, + other: 'RuntimeResponseGenericRuntimeResponseTypeIframe') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__( + self, + other: 'RuntimeResponseGenericRuntimeResponseTypeIframe') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class Value(object): +class RuntimeResponseGenericRuntimeResponseTypeImage(RuntimeResponseGeneric): + """ + RuntimeResponseGenericRuntimeResponseTypeImage. + + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + :param str source: The `https:` URL of the image. + :param str title: (optional) The title or introductory text to show before the + response. + :param str description: (optional) The description to show with the response. + :param List[ResponseGenericChannel] channels: (optional) An array of objects + specifying channels for which the response is intended. If **channels** is + present, the response is intended for a built-in integration and should not be + handled by an API client. + :param str alt_text: (optional) Descriptive text that can be used for screen + readers or other situations where the image cannot be seen. """ - Value. - - :attr str value: The text of the entity value. This string must conform to the - following restrictions: - - It cannot contain carriage return, newline, or tab characters. - - It cannot consist of only whitespace characters. - - It must be no longer than 64 characters. - :attr dict metadata: (optional) Any metadata related to the entity value. - :attr str value_type: Specifies the type of entity value. - :attr list[str] synonyms: (optional) An array of synonyms for the entity value. A - value can specify either synonyms or patterns (depending on the value type), but not - both. A synonym must conform to the following resrictions: - - It cannot contain carriage return, newline, or tab characters. - - It cannot consist of only whitespace characters. - - It must be no longer than 64 characters. - :attr list[str] patterns: (optional) An array of patterns for the entity value. A - value can specify either synonyms or patterns (depending on the value type), but not - both. A pattern is a regular expression no longer than 512 characters. For more - information about how to specify a pattern, see the - [documentation](https://cloud.ibm.com/docs/services/assistant/entities.html#entities-create-dictionary-based). - :attr datetime created: (optional) The timestamp for creation of the object. - :attr datetime updated: (optional) The timestamp for the most recent update to the - object. - """ - - def __init__(self, - value, - value_type, - metadata=None, - synonyms=None, - patterns=None, - created=None, - updated=None): - """ - Initialize a Value object. - :param str value: The text of the entity value. This string must conform to the - following restrictions: - - It cannot contain carriage return, newline, or tab characters. - - It cannot consist of only whitespace characters. - - It must be no longer than 64 characters. - :param str value_type: Specifies the type of entity value. - :param dict metadata: (optional) Any metadata related to the entity value. - :param list[str] synonyms: (optional) An array of synonyms for the entity value. A - value can specify either synonyms or patterns (depending on the value type), but - not both. A synonym must conform to the following resrictions: - - It cannot contain carriage return, newline, or tab characters. - - It cannot consist of only whitespace characters. - - It must be no longer than 64 characters. - :param list[str] patterns: (optional) An array of patterns for the entity value. A - value can specify either synonyms or patterns (depending on the value type), but - not both. A pattern is a regular expression no longer than 512 characters. For - more information about how to specify a pattern, see the - [documentation](https://cloud.ibm.com/docs/services/assistant/entities.html#entities-create-dictionary-based). - :param datetime created: (optional) The timestamp for creation of the object. - :param datetime updated: (optional) The timestamp for the most recent update to - the object. - """ - self.value = value - self.metadata = metadata - self.value_type = value_type - self.synonyms = synonyms - self.patterns = patterns - self.created = created - self.updated = updated + def __init__( + self, + response_type: str, + source: str, + *, + title: Optional[str] = None, + description: Optional[str] = None, + channels: Optional[List['ResponseGenericChannel']] = None, + alt_text: Optional[str] = None, + ) -> None: + """ + Initialize a RuntimeResponseGenericRuntimeResponseTypeImage object. + + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + :param str source: The `https:` URL of the image. + :param str title: (optional) The title or introductory text to show before + the response. + :param str description: (optional) The description to show with the + response. + :param List[ResponseGenericChannel] channels: (optional) An array of + objects specifying channels for which the response is intended. If + **channels** is present, the response is intended for a built-in + integration and should not be handled by an API client. + :param str alt_text: (optional) Descriptive text that can be used for + screen readers or other situations where the image cannot be seen. + """ + # pylint: disable=super-init-not-called + self.response_type = response_type + self.source = source + self.title = title + self.description = description + self.channels = channels + self.alt_text = alt_text @classmethod - def _from_dict(cls, _dict): - """Initialize a Value object from a json dictionary.""" + def from_dict( + cls, + _dict: Dict) -> 'RuntimeResponseGenericRuntimeResponseTypeImage': + """Initialize a RuntimeResponseGenericRuntimeResponseTypeImage object from a json dictionary.""" args = {} - if 'value' in _dict: - args['value'] = _dict.get('value') + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type else: raise ValueError( - 'Required property \'value\' not present in Value JSON') - if 'metadata' in _dict: - args['metadata'] = _dict.get('metadata') - if 'type' in _dict or 'value_type' in _dict: - args['value_type'] = _dict.get('type') or _dict.get('value_type') + 'Required property \'response_type\' not present in RuntimeResponseGenericRuntimeResponseTypeImage JSON' + ) + if (source := _dict.get('source')) is not None: + args['source'] = source else: raise ValueError( - 'Required property \'type\' not present in Value JSON') - if 'synonyms' in _dict: - args['synonyms'] = _dict.get('synonyms') - if 'patterns' in _dict: - args['patterns'] = _dict.get('patterns') - if 'created' in _dict: - args['created'] = string_to_datetime(_dict.get('created')) - if 'updated' in _dict: - args['updated'] = string_to_datetime(_dict.get('updated')) + 'Required property \'source\' not present in RuntimeResponseGenericRuntimeResponseTypeImage JSON' + ) + if (title := _dict.get('title')) is not None: + args['title'] = title + if (description := _dict.get('description')) is not None: + args['description'] = description + if (channels := _dict.get('channels')) is not None: + args['channels'] = [ + ResponseGenericChannel.from_dict(v) for v in channels + ] + if (alt_text := _dict.get('alt_text')) is not None: + args['alt_text'] = alt_text return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a RuntimeResponseGenericRuntimeResponseTypeImage object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'value') and self.value is not None: - _dict['value'] = self.value - if hasattr(self, 'metadata') and self.metadata is not None: - _dict['metadata'] = self.metadata - if hasattr(self, 'value_type') and self.value_type is not None: - _dict['type'] = self.value_type - if hasattr(self, 'synonyms') and self.synonyms is not None: - _dict['synonyms'] = self.synonyms - if hasattr(self, 'patterns') and self.patterns is not None: - _dict['patterns'] = self.patterns - if hasattr(self, 'created') and self.created is not None: - _dict['created'] = datetime_to_string(self.created) - if hasattr(self, 'updated') and self.updated is not None: - _dict['updated'] = datetime_to_string(self.updated) + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, 'source') and self.source is not None: + _dict['source'] = self.source + if hasattr(self, 'title') and self.title is not None: + _dict['title'] = self.title + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'channels') and self.channels is not None: + channels_list = [] + for v in self.channels: + if isinstance(v, dict): + channels_list.append(v) + else: + channels_list.append(v.to_dict()) + _dict['channels'] = channels_list + if hasattr(self, 'alt_text') and self.alt_text is not None: + _dict['alt_text'] = self.alt_text return _dict - def __str__(self): - """Return a `str` version of this Value object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this RuntimeResponseGenericRuntimeResponseTypeImage object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, + other: 'RuntimeResponseGenericRuntimeResponseTypeImage') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, + other: 'RuntimeResponseGenericRuntimeResponseTypeImage') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class ValueCollection(object): +class RuntimeResponseGenericRuntimeResponseTypeOption(RuntimeResponseGeneric): """ - ValueCollection. - - :attr list[Value] values: An array of entity values. - :attr Pagination pagination: The pagination data for the returned objects. + RuntimeResponseGenericRuntimeResponseTypeOption. + + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + :param str title: The title or introductory text to show before the response. + :param str description: (optional) The description to show with the response. + :param str preference: (optional) The preferred type of control to display. + :param List[DialogNodeOutputOptionsElement] options: An array of objects + describing the options from which the user can choose. + :param List[ResponseGenericChannel] channels: (optional) An array of objects + specifying channels for which the response is intended. If **channels** is + present, the response is intended for a built-in integration and should not be + handled by an API client. """ - def __init__(self, values, pagination): - """ - Initialize a ValueCollection object. - - :param list[Value] values: An array of entity values. - :param Pagination pagination: The pagination data for the returned objects. + def __init__( + self, + response_type: str, + title: str, + options: List['DialogNodeOutputOptionsElement'], + *, + description: Optional[str] = None, + preference: Optional[str] = None, + channels: Optional[List['ResponseGenericChannel']] = None, + ) -> None: + """ + Initialize a RuntimeResponseGenericRuntimeResponseTypeOption object. + + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + :param str title: The title or introductory text to show before the + response. + :param List[DialogNodeOutputOptionsElement] options: An array of objects + describing the options from which the user can choose. + :param str description: (optional) The description to show with the + response. + :param str preference: (optional) The preferred type of control to display. + :param List[ResponseGenericChannel] channels: (optional) An array of + objects specifying channels for which the response is intended. If + **channels** is present, the response is intended for a built-in + integration and should not be handled by an API client. """ - self.values = values - self.pagination = pagination + # pylint: disable=super-init-not-called + self.response_type = response_type + self.title = title + self.description = description + self.preference = preference + self.options = options + self.channels = channels @classmethod - def _from_dict(cls, _dict): - """Initialize a ValueCollection object from a json dictionary.""" + def from_dict( + cls, + _dict: Dict) -> 'RuntimeResponseGenericRuntimeResponseTypeOption': + """Initialize a RuntimeResponseGenericRuntimeResponseTypeOption object from a json dictionary.""" args = {} - if 'values' in _dict: - args['values'] = [ - Value._from_dict(x) for x in (_dict.get('values')) - ] + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type else: raise ValueError( - 'Required property \'values\' not present in ValueCollection JSON' + 'Required property \'response_type\' not present in RuntimeResponseGenericRuntimeResponseTypeOption JSON' ) - if 'pagination' in _dict: - args['pagination'] = Pagination._from_dict(_dict.get('pagination')) + if (title := _dict.get('title')) is not None: + args['title'] = title else: raise ValueError( - 'Required property \'pagination\' not present in ValueCollection JSON' + 'Required property \'title\' not present in RuntimeResponseGenericRuntimeResponseTypeOption JSON' + ) + if (description := _dict.get('description')) is not None: + args['description'] = description + if (preference := _dict.get('preference')) is not None: + args['preference'] = preference + if (options := _dict.get('options')) is not None: + args['options'] = [ + DialogNodeOutputOptionsElement.from_dict(v) for v in options + ] + else: + raise ValueError( + 'Required property \'options\' not present in RuntimeResponseGenericRuntimeResponseTypeOption JSON' ) + if (channels := _dict.get('channels')) is not None: + args['channels'] = [ + ResponseGenericChannel.from_dict(v) for v in channels + ] return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a RuntimeResponseGenericRuntimeResponseTypeOption object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'values') and self.values is not None: - _dict['values'] = [x._to_dict() for x in self.values] - if hasattr(self, 'pagination') and self.pagination is not None: - _dict['pagination'] = self.pagination._to_dict() + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, 'title') and self.title is not None: + _dict['title'] = self.title + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'preference') and self.preference is not None: + _dict['preference'] = self.preference + if hasattr(self, 'options') and self.options is not None: + options_list = [] + for v in self.options: + if isinstance(v, dict): + options_list.append(v) + else: + options_list.append(v.to_dict()) + _dict['options'] = options_list + if hasattr(self, 'channels') and self.channels is not None: + channels_list = [] + for v in self.channels: + if isinstance(v, dict): + channels_list.append(v) + else: + channels_list.append(v.to_dict()) + _dict['channels'] = channels_list return _dict - def __str__(self): - """Return a `str` version of this ValueCollection object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this RuntimeResponseGenericRuntimeResponseTypeOption object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__( + self, + other: 'RuntimeResponseGenericRuntimeResponseTypeOption') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__( + self, + other: 'RuntimeResponseGenericRuntimeResponseTypeOption') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other + class PreferenceEnum(str, Enum): + """ + The preferred type of control to display. + """ -class Workspace(object): - """ - Workspace. + DROPDOWN = 'dropdown' + BUTTON = 'button' - :attr str name: The name of the workspace. This string cannot contain carriage return, - newline, or tab characters, and it must be no longer than 64 characters. - :attr str description: (optional) The description of the workspace. This string cannot - contain carriage return, newline, or tab characters, and it must be no longer than 128 - characters. - :attr str language: The language of the workspace. - :attr dict metadata: (optional) Any metadata related to the workspace. - :attr bool learning_opt_out: Whether training data from the workspace (including - artifacts such as intents and entities) can be used by IBM for general service - improvements. `true` indicates that workspace training data is not to be used. - :attr WorkspaceSystemSettings system_settings: (optional) Global settings for the - workspace. - :attr str workspace_id: The workspace ID of the workspace. - :attr str status: (optional) The current status of the workspace. - :attr datetime created: (optional) The timestamp for creation of the object. - :attr datetime updated: (optional) The timestamp for the most recent update to the - object. - :attr list[Intent] intents: (optional) An array of intents. - :attr list[Entity] entities: (optional) An array of objects describing the entities - for the workspace. - :attr list[DialogNode] dialog_nodes: (optional) An array of objects describing the - dialog nodes in the workspace. - :attr list[Counterexample] counterexamples: (optional) An array of counterexamples. - """ - - def __init__(self, - name, - language, - learning_opt_out, - workspace_id, - description=None, - metadata=None, - system_settings=None, - status=None, - created=None, - updated=None, - intents=None, - entities=None, - dialog_nodes=None, - counterexamples=None): - """ - Initialize a Workspace object. - :param str name: The name of the workspace. This string cannot contain carriage - return, newline, or tab characters, and it must be no longer than 64 characters. - :param str language: The language of the workspace. - :param bool learning_opt_out: Whether training data from the workspace (including - artifacts such as intents and entities) can be used by IBM for general service - improvements. `true` indicates that workspace training data is not to be used. - :param str workspace_id: The workspace ID of the workspace. - :param str description: (optional) The description of the workspace. This string - cannot contain carriage return, newline, or tab characters, and it must be no - longer than 128 characters. - :param dict metadata: (optional) Any metadata related to the workspace. - :param WorkspaceSystemSettings system_settings: (optional) Global settings for the - workspace. - :param str status: (optional) The current status of the workspace. - :param datetime created: (optional) The timestamp for creation of the object. - :param datetime updated: (optional) The timestamp for the most recent update to - the object. - :param list[Intent] intents: (optional) An array of intents. - :param list[Entity] entities: (optional) An array of objects describing the - entities for the workspace. - :param list[DialogNode] dialog_nodes: (optional) An array of objects describing - the dialog nodes in the workspace. - :param list[Counterexample] counterexamples: (optional) An array of - counterexamples. - """ - self.name = name - self.description = description - self.language = language - self.metadata = metadata - self.learning_opt_out = learning_opt_out - self.system_settings = system_settings - self.workspace_id = workspace_id - self.status = status - self.created = created - self.updated = updated - self.intents = intents - self.entities = entities - self.dialog_nodes = dialog_nodes - self.counterexamples = counterexamples +class RuntimeResponseGenericRuntimeResponseTypePause(RuntimeResponseGeneric): + """ + RuntimeResponseGenericRuntimeResponseTypePause. + + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + :param int time: How long to pause, in milliseconds. + :param bool typing: (optional) Whether to send a "user is typing" event during + the pause. + :param List[ResponseGenericChannel] channels: (optional) An array of objects + specifying channels for which the response is intended. If **channels** is + present, the response is intended for a built-in integration and should not be + handled by an API client. + """ + + def __init__( + self, + response_type: str, + time: int, + *, + typing: Optional[bool] = None, + channels: Optional[List['ResponseGenericChannel']] = None, + ) -> None: + """ + Initialize a RuntimeResponseGenericRuntimeResponseTypePause object. + + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + :param int time: How long to pause, in milliseconds. + :param bool typing: (optional) Whether to send a "user is typing" event + during the pause. + :param List[ResponseGenericChannel] channels: (optional) An array of + objects specifying channels for which the response is intended. If + **channels** is present, the response is intended for a built-in + integration and should not be handled by an API client. + """ + # pylint: disable=super-init-not-called + self.response_type = response_type + self.time = time + self.typing = typing + self.channels = channels @classmethod - def _from_dict(cls, _dict): - """Initialize a Workspace object from a json dictionary.""" + def from_dict( + cls, + _dict: Dict) -> 'RuntimeResponseGenericRuntimeResponseTypePause': + """Initialize a RuntimeResponseGenericRuntimeResponseTypePause object from a json dictionary.""" args = {} - if 'name' in _dict: - args['name'] = _dict.get('name') - else: - raise ValueError( - 'Required property \'name\' not present in Workspace JSON') - if 'description' in _dict: - args['description'] = _dict.get('description') - if 'language' in _dict: - args['language'] = _dict.get('language') - else: - raise ValueError( - 'Required property \'language\' not present in Workspace JSON') - if 'metadata' in _dict: - args['metadata'] = _dict.get('metadata') - if 'learning_opt_out' in _dict: - args['learning_opt_out'] = _dict.get('learning_opt_out') + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type else: raise ValueError( - 'Required property \'learning_opt_out\' not present in Workspace JSON' + 'Required property \'response_type\' not present in RuntimeResponseGenericRuntimeResponseTypePause JSON' ) - if 'system_settings' in _dict: - args['system_settings'] = WorkspaceSystemSettings._from_dict( - _dict.get('system_settings')) - if 'workspace_id' in _dict: - args['workspace_id'] = _dict.get('workspace_id') + if (time := _dict.get('time')) is not None: + args['time'] = time else: raise ValueError( - 'Required property \'workspace_id\' not present in Workspace JSON' + 'Required property \'time\' not present in RuntimeResponseGenericRuntimeResponseTypePause JSON' ) - if 'status' in _dict: - args['status'] = _dict.get('status') - if 'created' in _dict: - args['created'] = string_to_datetime(_dict.get('created')) - if 'updated' in _dict: - args['updated'] = string_to_datetime(_dict.get('updated')) - if 'intents' in _dict: - args['intents'] = [ - Intent._from_dict(x) for x in (_dict.get('intents')) - ] - if 'entities' in _dict: - args['entities'] = [ - Entity._from_dict(x) for x in (_dict.get('entities')) - ] - if 'dialog_nodes' in _dict: - args['dialog_nodes'] = [ - DialogNode._from_dict(x) for x in (_dict.get('dialog_nodes')) - ] - if 'counterexamples' in _dict: - args['counterexamples'] = [ - Counterexample._from_dict(x) - for x in (_dict.get('counterexamples')) + if (typing := _dict.get('typing')) is not None: + args['typing'] = typing + if (channels := _dict.get('channels')) is not None: + args['channels'] = [ + ResponseGenericChannel.from_dict(v) for v in channels ] return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a RuntimeResponseGenericRuntimeResponseTypePause object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'name') and self.name is not None: - _dict['name'] = self.name - if hasattr(self, 'description') and self.description is not None: - _dict['description'] = self.description - if hasattr(self, 'language') and self.language is not None: - _dict['language'] = self.language - if hasattr(self, 'metadata') and self.metadata is not None: - _dict['metadata'] = self.metadata - if hasattr(self, - 'learning_opt_out') and self.learning_opt_out is not None: - _dict['learning_opt_out'] = self.learning_opt_out - if hasattr(self, - 'system_settings') and self.system_settings is not None: - _dict['system_settings'] = self.system_settings._to_dict() - if hasattr(self, 'workspace_id') and self.workspace_id is not None: - _dict['workspace_id'] = self.workspace_id - if hasattr(self, 'status') and self.status is not None: - _dict['status'] = self.status - if hasattr(self, 'created') and self.created is not None: - _dict['created'] = datetime_to_string(self.created) - if hasattr(self, 'updated') and self.updated is not None: - _dict['updated'] = datetime_to_string(self.updated) - if hasattr(self, 'intents') and self.intents is not None: - _dict['intents'] = [x._to_dict() for x in self.intents] - if hasattr(self, 'entities') and self.entities is not None: - _dict['entities'] = [x._to_dict() for x in self.entities] - if hasattr(self, 'dialog_nodes') and self.dialog_nodes is not None: - _dict['dialog_nodes'] = [x._to_dict() for x in self.dialog_nodes] - if hasattr(self, - 'counterexamples') and self.counterexamples is not None: - _dict['counterexamples'] = [ - x._to_dict() for x in self.counterexamples - ] + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, 'time') and self.time is not None: + _dict['time'] = self.time + if hasattr(self, 'typing') and self.typing is not None: + _dict['typing'] = self.typing + if hasattr(self, 'channels') and self.channels is not None: + channels_list = [] + for v in self.channels: + if isinstance(v, dict): + channels_list.append(v) + else: + channels_list.append(v.to_dict()) + _dict['channels'] = channels_list return _dict - def __str__(self): - """Return a `str` version of this Workspace object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __eq__(self, other): + def __str__(self) -> str: + """Return a `str` version of this RuntimeResponseGenericRuntimeResponseTypePause object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, + other: 'RuntimeResponseGenericRuntimeResponseTypePause') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, + other: 'RuntimeResponseGenericRuntimeResponseTypePause') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class WorkspaceCollection(object): +class RuntimeResponseGenericRuntimeResponseTypeSuggestion( + RuntimeResponseGeneric): """ - WorkspaceCollection. - - :attr list[Workspace] workspaces: An array of objects describing the workspaces - associated with the service instance. - :attr Pagination pagination: The pagination data for the returned objects. + RuntimeResponseGenericRuntimeResponseTypeSuggestion. + + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + :param str title: The title or introductory text to show before the response. + :param List[DialogSuggestion] suggestions: An array of objects describing the + possible matching dialog nodes from which the user can choose. + :param List[ResponseGenericChannel] channels: (optional) An array of objects + specifying channels for which the response is intended. If **channels** is + present, the response is intended for a built-in integration and should not be + handled by an API client. """ - def __init__(self, workspaces, pagination): - """ - Initialize a WorkspaceCollection object. - - :param list[Workspace] workspaces: An array of objects describing the workspaces - associated with the service instance. - :param Pagination pagination: The pagination data for the returned objects. - """ - self.workspaces = workspaces - self.pagination = pagination + def __init__( + self, + response_type: str, + title: str, + suggestions: List['DialogSuggestion'], + *, + channels: Optional[List['ResponseGenericChannel']] = None, + ) -> None: + """ + Initialize a RuntimeResponseGenericRuntimeResponseTypeSuggestion object. + + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + :param str title: The title or introductory text to show before the + response. + :param List[DialogSuggestion] suggestions: An array of objects describing + the possible matching dialog nodes from which the user can choose. + :param List[ResponseGenericChannel] channels: (optional) An array of + objects specifying channels for which the response is intended. If + **channels** is present, the response is intended for a built-in + integration and should not be handled by an API client. + """ + # pylint: disable=super-init-not-called + self.response_type = response_type + self.title = title + self.suggestions = suggestions + self.channels = channels @classmethod - def _from_dict(cls, _dict): - """Initialize a WorkspaceCollection object from a json dictionary.""" + def from_dict( + cls, _dict: Dict + ) -> 'RuntimeResponseGenericRuntimeResponseTypeSuggestion': + """Initialize a RuntimeResponseGenericRuntimeResponseTypeSuggestion object from a json dictionary.""" args = {} - if 'workspaces' in _dict: - args['workspaces'] = [ - Workspace._from_dict(x) for x in (_dict.get('workspaces')) - ] + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type else: raise ValueError( - 'Required property \'workspaces\' not present in WorkspaceCollection JSON' + 'Required property \'response_type\' not present in RuntimeResponseGenericRuntimeResponseTypeSuggestion JSON' ) - if 'pagination' in _dict: - args['pagination'] = Pagination._from_dict(_dict.get('pagination')) + if (title := _dict.get('title')) is not None: + args['title'] = title else: raise ValueError( - 'Required property \'pagination\' not present in WorkspaceCollection JSON' + 'Required property \'title\' not present in RuntimeResponseGenericRuntimeResponseTypeSuggestion JSON' + ) + if (suggestions := _dict.get('suggestions')) is not None: + args['suggestions'] = [ + DialogSuggestion.from_dict(v) for v in suggestions + ] + else: + raise ValueError( + 'Required property \'suggestions\' not present in RuntimeResponseGenericRuntimeResponseTypeSuggestion JSON' ) + if (channels := _dict.get('channels')) is not None: + args['channels'] = [ + ResponseGenericChannel.from_dict(v) for v in channels + ] return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a RuntimeResponseGenericRuntimeResponseTypeSuggestion object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'workspaces') and self.workspaces is not None: - _dict['workspaces'] = [x._to_dict() for x in self.workspaces] - if hasattr(self, 'pagination') and self.pagination is not None: - _dict['pagination'] = self.pagination._to_dict() + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, 'title') and self.title is not None: + _dict['title'] = self.title + if hasattr(self, 'suggestions') and self.suggestions is not None: + suggestions_list = [] + for v in self.suggestions: + if isinstance(v, dict): + suggestions_list.append(v) + else: + suggestions_list.append(v.to_dict()) + _dict['suggestions'] = suggestions_list + if hasattr(self, 'channels') and self.channels is not None: + channels_list = [] + for v in self.channels: + if isinstance(v, dict): + channels_list.append(v) + else: + channels_list.append(v.to_dict()) + _dict['channels'] = channels_list return _dict - def __str__(self): - """Return a `str` version of this WorkspaceCollection object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this RuntimeResponseGenericRuntimeResponseTypeSuggestion object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__( + self, other: 'RuntimeResponseGenericRuntimeResponseTypeSuggestion' + ) -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__( + self, other: 'RuntimeResponseGenericRuntimeResponseTypeSuggestion' + ) -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class WorkspaceSystemSettings(object): +class RuntimeResponseGenericRuntimeResponseTypeText(RuntimeResponseGeneric): """ - Global settings for the workspace. - - :attr WorkspaceSystemSettingsTooling tooling: (optional) Workspace settings related to - the Watson Assistant tool. - :attr WorkspaceSystemSettingsDisambiguation disambiguation: (optional) Workspace - settings related to the disambiguation feature. - **Note:** This feature is available only to Premium users. - :attr dict human_agent_assist: (optional) For internal use only. + RuntimeResponseGenericRuntimeResponseTypeText. + + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + :param str text: The text of the response. + :param List[ResponseGenericChannel] channels: (optional) An array of objects + specifying channels for which the response is intended. If **channels** is + present, the response is intended for a built-in integration and should not be + handled by an API client. """ - def __init__(self, - tooling=None, - disambiguation=None, - human_agent_assist=None): - """ - Initialize a WorkspaceSystemSettings object. - - :param WorkspaceSystemSettingsTooling tooling: (optional) Workspace settings - related to the Watson Assistant tool. - :param WorkspaceSystemSettingsDisambiguation disambiguation: (optional) Workspace - settings related to the disambiguation feature. - **Note:** This feature is available only to Premium users. - :param dict human_agent_assist: (optional) For internal use only. - """ - self.tooling = tooling - self.disambiguation = disambiguation - self.human_agent_assist = human_agent_assist + def __init__( + self, + response_type: str, + text: str, + *, + channels: Optional[List['ResponseGenericChannel']] = None, + ) -> None: + """ + Initialize a RuntimeResponseGenericRuntimeResponseTypeText object. + + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + :param str text: The text of the response. + :param List[ResponseGenericChannel] channels: (optional) An array of + objects specifying channels for which the response is intended. If + **channels** is present, the response is intended for a built-in + integration and should not be handled by an API client. + """ + # pylint: disable=super-init-not-called + self.response_type = response_type + self.text = text + self.channels = channels @classmethod - def _from_dict(cls, _dict): - """Initialize a WorkspaceSystemSettings object from a json dictionary.""" + def from_dict( + cls, + _dict: Dict) -> 'RuntimeResponseGenericRuntimeResponseTypeText': + """Initialize a RuntimeResponseGenericRuntimeResponseTypeText object from a json dictionary.""" args = {} - if 'tooling' in _dict: - args['tooling'] = WorkspaceSystemSettingsTooling._from_dict( - _dict.get('tooling')) - if 'disambiguation' in _dict: - args[ - 'disambiguation'] = WorkspaceSystemSettingsDisambiguation._from_dict( - _dict.get('disambiguation')) - if 'human_agent_assist' in _dict: - args['human_agent_assist'] = _dict.get('human_agent_assist') + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type + else: + raise ValueError( + 'Required property \'response_type\' not present in RuntimeResponseGenericRuntimeResponseTypeText JSON' + ) + if (text := _dict.get('text')) is not None: + args['text'] = text + else: + raise ValueError( + 'Required property \'text\' not present in RuntimeResponseGenericRuntimeResponseTypeText JSON' + ) + if (channels := _dict.get('channels')) is not None: + args['channels'] = [ + ResponseGenericChannel.from_dict(v) for v in channels + ] return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a RuntimeResponseGenericRuntimeResponseTypeText object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'tooling') and self.tooling is not None: - _dict['tooling'] = self.tooling._to_dict() - if hasattr(self, 'disambiguation') and self.disambiguation is not None: - _dict['disambiguation'] = self.disambiguation._to_dict() - if hasattr( - self, - 'human_agent_assist') and self.human_agent_assist is not None: - _dict['human_agent_assist'] = self.human_agent_assist + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, 'text') and self.text is not None: + _dict['text'] = self.text + if hasattr(self, 'channels') and self.channels is not None: + channels_list = [] + for v in self.channels: + if isinstance(v, dict): + channels_list.append(v) + else: + channels_list.append(v.to_dict()) + _dict['channels'] = channels_list return _dict - def __str__(self): - """Return a `str` version of this WorkspaceSystemSettings object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __eq__(self, other): + def __str__(self) -> str: + """Return a `str` version of this RuntimeResponseGenericRuntimeResponseTypeText object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, + other: 'RuntimeResponseGenericRuntimeResponseTypeText') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, + other: 'RuntimeResponseGenericRuntimeResponseTypeText') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class WorkspaceSystemSettingsDisambiguation(object): +class RuntimeResponseGenericRuntimeResponseTypeUserDefined( + RuntimeResponseGeneric): + """ + RuntimeResponseGenericRuntimeResponseTypeUserDefined. + + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + :param dict user_defined: An object containing any properties for the + user-defined response type. + :param List[ResponseGenericChannel] channels: (optional) An array of objects + specifying channels for which the response is intended. If **channels** is + present, the response is intended for a built-in integration and should not be + handled by an API client. """ - Workspace settings related to the disambiguation feature. - **Note:** This feature is available only to Premium users. - - :attr str prompt: (optional) The text of the introductory prompt that accompanies - disambiguation options presented to the user. - :attr str none_of_the_above_prompt: (optional) The user-facing label for the option - users can select if none of the suggested options is correct. If no value is specified - for this property, this option does not appear. - :attr bool enabled: (optional) Whether the disambiguation feature is enabled for the - workspace. - :attr str sensitivity: (optional) The sensitivity of the disambiguation feature to - intent detection conflicts. Set to **high** if you want the disambiguation feature to - be triggered more often. This can be useful for testing or demonstration purposes. - """ - - def __init__(self, - prompt=None, - none_of_the_above_prompt=None, - enabled=None, - sensitivity=None): - """ - Initialize a WorkspaceSystemSettingsDisambiguation object. - :param str prompt: (optional) The text of the introductory prompt that accompanies - disambiguation options presented to the user. - :param str none_of_the_above_prompt: (optional) The user-facing label for the - option users can select if none of the suggested options is correct. If no value - is specified for this property, this option does not appear. - :param bool enabled: (optional) Whether the disambiguation feature is enabled for - the workspace. - :param str sensitivity: (optional) The sensitivity of the disambiguation feature - to intent detection conflicts. Set to **high** if you want the disambiguation - feature to be triggered more often. This can be useful for testing or - demonstration purposes. - """ - self.prompt = prompt - self.none_of_the_above_prompt = none_of_the_above_prompt - self.enabled = enabled - self.sensitivity = sensitivity + def __init__( + self, + response_type: str, + user_defined: dict, + *, + channels: Optional[List['ResponseGenericChannel']] = None, + ) -> None: + """ + Initialize a RuntimeResponseGenericRuntimeResponseTypeUserDefined object. + + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + :param dict user_defined: An object containing any properties for the + user-defined response type. + :param List[ResponseGenericChannel] channels: (optional) An array of + objects specifying channels for which the response is intended. If + **channels** is present, the response is intended for a built-in + integration and should not be handled by an API client. + """ + # pylint: disable=super-init-not-called + self.response_type = response_type + self.user_defined = user_defined + self.channels = channels @classmethod - def _from_dict(cls, _dict): - """Initialize a WorkspaceSystemSettingsDisambiguation object from a json dictionary.""" + def from_dict( + cls, _dict: Dict + ) -> 'RuntimeResponseGenericRuntimeResponseTypeUserDefined': + """Initialize a RuntimeResponseGenericRuntimeResponseTypeUserDefined object from a json dictionary.""" args = {} - if 'prompt' in _dict: - args['prompt'] = _dict.get('prompt') - if 'none_of_the_above_prompt' in _dict: - args['none_of_the_above_prompt'] = _dict.get( - 'none_of_the_above_prompt') - if 'enabled' in _dict: - args['enabled'] = _dict.get('enabled') - if 'sensitivity' in _dict: - args['sensitivity'] = _dict.get('sensitivity') + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type + else: + raise ValueError( + 'Required property \'response_type\' not present in RuntimeResponseGenericRuntimeResponseTypeUserDefined JSON' + ) + if (user_defined := _dict.get('user_defined')) is not None: + args['user_defined'] = user_defined + else: + raise ValueError( + 'Required property \'user_defined\' not present in RuntimeResponseGenericRuntimeResponseTypeUserDefined JSON' + ) + if (channels := _dict.get('channels')) is not None: + args['channels'] = [ + ResponseGenericChannel.from_dict(v) for v in channels + ] return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a RuntimeResponseGenericRuntimeResponseTypeUserDefined object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'prompt') and self.prompt is not None: - _dict['prompt'] = self.prompt - if hasattr(self, 'none_of_the_above_prompt' - ) and self.none_of_the_above_prompt is not None: - _dict['none_of_the_above_prompt'] = self.none_of_the_above_prompt - if hasattr(self, 'enabled') and self.enabled is not None: - _dict['enabled'] = self.enabled - if hasattr(self, 'sensitivity') and self.sensitivity is not None: - _dict['sensitivity'] = self.sensitivity + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, 'user_defined') and self.user_defined is not None: + _dict['user_defined'] = self.user_defined + if hasattr(self, 'channels') and self.channels is not None: + channels_list = [] + for v in self.channels: + if isinstance(v, dict): + channels_list.append(v) + else: + channels_list.append(v.to_dict()) + _dict['channels'] = channels_list return _dict - def __str__(self): - """Return a `str` version of this WorkspaceSystemSettingsDisambiguation object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this RuntimeResponseGenericRuntimeResponseTypeUserDefined object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__( + self, other: 'RuntimeResponseGenericRuntimeResponseTypeUserDefined' + ) -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__( + self, other: 'RuntimeResponseGenericRuntimeResponseTypeUserDefined' + ) -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class WorkspaceSystemSettingsTooling(object): +class RuntimeResponseGenericRuntimeResponseTypeVideo(RuntimeResponseGeneric): """ - Workspace settings related to the Watson Assistant tool. - - :attr bool store_generic_responses: (optional) Whether the dialog JSON editor displays - text responses within the `output.generic` object. + RuntimeResponseGenericRuntimeResponseTypeVideo. + + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + :param str source: The `https:` URL of the video. + :param str title: (optional) The title or introductory text to show before the + response. + :param str description: (optional) The description to show with the response. + :param List[ResponseGenericChannel] channels: (optional) An array of objects + specifying channels for which the response is intended. If **channels** is + present, the response is intended for a built-in integration and should not be + handled by an API client. + :param dict channel_options: (optional) For internal use only. + :param str alt_text: (optional) Descriptive text that can be used for screen + readers or other situations where the video cannot be seen. """ - def __init__(self, store_generic_responses=None): - """ - Initialize a WorkspaceSystemSettingsTooling object. - - :param bool store_generic_responses: (optional) Whether the dialog JSON editor - displays text responses within the `output.generic` object. - """ - self.store_generic_responses = store_generic_responses + def __init__( + self, + response_type: str, + source: str, + *, + title: Optional[str] = None, + description: Optional[str] = None, + channels: Optional[List['ResponseGenericChannel']] = None, + channel_options: Optional[dict] = None, + alt_text: Optional[str] = None, + ) -> None: + """ + Initialize a RuntimeResponseGenericRuntimeResponseTypeVideo object. + + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + :param str source: The `https:` URL of the video. + :param str title: (optional) The title or introductory text to show before + the response. + :param str description: (optional) The description to show with the + response. + :param List[ResponseGenericChannel] channels: (optional) An array of + objects specifying channels for which the response is intended. If + **channels** is present, the response is intended for a built-in + integration and should not be handled by an API client. + :param dict channel_options: (optional) For internal use only. + :param str alt_text: (optional) Descriptive text that can be used for + screen readers or other situations where the video cannot be seen. + """ + # pylint: disable=super-init-not-called + self.response_type = response_type + self.source = source + self.title = title + self.description = description + self.channels = channels + self.channel_options = channel_options + self.alt_text = alt_text @classmethod - def _from_dict(cls, _dict): - """Initialize a WorkspaceSystemSettingsTooling object from a json dictionary.""" + def from_dict( + cls, + _dict: Dict) -> 'RuntimeResponseGenericRuntimeResponseTypeVideo': + """Initialize a RuntimeResponseGenericRuntimeResponseTypeVideo object from a json dictionary.""" args = {} - if 'store_generic_responses' in _dict: - args['store_generic_responses'] = _dict.get( - 'store_generic_responses') + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type + else: + raise ValueError( + 'Required property \'response_type\' not present in RuntimeResponseGenericRuntimeResponseTypeVideo JSON' + ) + if (source := _dict.get('source')) is not None: + args['source'] = source + else: + raise ValueError( + 'Required property \'source\' not present in RuntimeResponseGenericRuntimeResponseTypeVideo JSON' + ) + if (title := _dict.get('title')) is not None: + args['title'] = title + if (description := _dict.get('description')) is not None: + args['description'] = description + if (channels := _dict.get('channels')) is not None: + args['channels'] = [ + ResponseGenericChannel.from_dict(v) for v in channels + ] + if (channel_options := _dict.get('channel_options')) is not None: + args['channel_options'] = channel_options + if (alt_text := _dict.get('alt_text')) is not None: + args['alt_text'] = alt_text return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a RuntimeResponseGenericRuntimeResponseTypeVideo object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'store_generic_responses' - ) and self.store_generic_responses is not None: - _dict['store_generic_responses'] = self.store_generic_responses + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, 'source') and self.source is not None: + _dict['source'] = self.source + if hasattr(self, 'title') and self.title is not None: + _dict['title'] = self.title + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'channels') and self.channels is not None: + channels_list = [] + for v in self.channels: + if isinstance(v, dict): + channels_list.append(v) + else: + channels_list.append(v.to_dict()) + _dict['channels'] = channels_list + if hasattr(self, + 'channel_options') and self.channel_options is not None: + _dict['channel_options'] = self.channel_options + if hasattr(self, 'alt_text') and self.alt_text is not None: + _dict['alt_text'] = self.alt_text return _dict - def __str__(self): - """Return a `str` version of this WorkspaceSystemSettingsTooling object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this RuntimeResponseGenericRuntimeResponseTypeVideo object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, + other: 'RuntimeResponseGenericRuntimeResponseTypeVideo') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, + other: 'RuntimeResponseGenericRuntimeResponseTypeVideo') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other diff --git a/ibm_watson/assistant_v2.py b/ibm_watson/assistant_v2.py index b556d8dbb..72dbd34c3 100644 --- a/ibm_watson/assistant_v2.py +++ b/ibm_watson/assistant_v2.py @@ -1,6 +1,6 @@ # coding: utf-8 -# Copyright 2018 IBM All Rights Reserved. +# (C) Copyright IBM Corp. 2019, 2026. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,17 +13,32 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +# IBM OpenAPI SDK Code Generator Version: 3.105.0-3c13b041-20250605-193116 """ -The IBM Watson™ Assistant service combines machine learning, natural language -understanding, and integrated dialog tools to create conversation flows between your apps -and your users. -""" +The IBM® watsonx™ Assistant service combines machine learning, natural language +understanding, and an integrated dialog editor to create conversation flows between your +apps and your users. +The Assistant v2 API provides runtime methods your client application can use to send user +input to an assistant and receive a response. +You need a paid Plus plan or higher to use the watsonx Assistant v2 API. -from __future__ import absolute_import +API Version: 2.0 +See: https://cloud.ibm.com/docs/assistant +""" +from datetime import datetime +from enum import Enum +from typing import BinaryIO, Dict, List, Optional import json +import sys + +from ibm_cloud_sdk_core import BaseService, DetailedResponse +from ibm_cloud_sdk_core.authenticators.authenticator import Authenticator +from ibm_cloud_sdk_core.get_authenticator import get_authenticator_from_environment +from ibm_cloud_sdk_core.utils import convert_model, datetime_to_string, string_to_datetime + from .common import get_sdk_headers -from ibm_cloud_sdk_core import BaseService ############################################################################## # Service @@ -33,1801 +48,23511 @@ class AssistantV2(BaseService): """The Assistant V2 service.""" - default_url = 'https://gateway.watsonplatform.net/assistant/api' + DEFAULT_SERVICE_URL = 'https://api.us-south.assistant.watson.cloud.ibm.com' + DEFAULT_SERVICE_NAME = 'assistant' def __init__( - self, - version, - url=default_url, - username=None, - password=None, - iam_apikey=None, - iam_access_token=None, - iam_url=None, - ): + self, + version: str, + authenticator: Authenticator = None, + service_name: str = DEFAULT_SERVICE_NAME, + ) -> None: """ Construct a new client for the Assistant service. - :param str version: The API version date to use with the service, in - "YYYY-MM-DD" format. Whenever the API is changed in a backwards - incompatible way, a new minor version of the API is released. - The service uses the API version for the date you specify, or - the most recent version before that date. Note that you should - not programmatically specify the current date at runtime, in - case the API has been updated since your application's release. - Instead, specify a version date that is compatible with your - application, and don't change it until your application is - ready for a later version. - - :param str url: The base url to use when contacting the service (e.g. - "https://gateway.watsonplatform.net/assistant/api/assistant/api"). - The base url may differ between IBM Cloud regions. - - :param str username: The username used to authenticate with the service. - Username and password credentials are only required to run your - application locally or outside of IBM Cloud. When running on - IBM Cloud, the credentials will be automatically loaded from the - `VCAP_SERVICES` environment variable. - - :param str password: The password used to authenticate with the service. - Username and password credentials are only required to run your - application locally or outside of IBM Cloud. When running on - IBM Cloud, the credentials will be automatically loaded from the - `VCAP_SERVICES` environment variable. - - :param str iam_apikey: An API key that can be used to request IAM tokens. If - this API key is provided, the SDK will manage the token and handle the - refreshing. - - :param str iam_access_token: An IAM access token is fully managed by the application. - Responsibility falls on the application to refresh the token, either before - it expires or reactively upon receiving a 401 from the service as any requests - made with an expired token will fail. - - :param str iam_url: An optional URL for the IAM service API. Defaults to - 'https://iam.cloud.ibm.com/identity/token'. - """ - - BaseService.__init__( - self, - vcap_services_name='conversation', - url=url, - username=username, - password=password, - iam_apikey=iam_apikey, - iam_access_token=iam_access_token, - iam_url=iam_url, - use_vcap_services=True, - display_name='Assistant') + :param str version: Release date of the API version you want to use. + Specify dates in YYYY-MM-DD format. The current version is `2024-08-25`. + + :param Authenticator authenticator: The authenticator specifies the authentication mechanism. + Get up to date information from https://github.com/IBM/python-sdk-core/blob/main/README.md + about initializing the authenticator of your choice. + """ + if version is None: + raise ValueError('version must be provided') + + if not authenticator: + authenticator = get_authenticator_from_environment(service_name) + BaseService.__init__(self, + service_url=self.DEFAULT_SERVICE_URL, + authenticator=authenticator) self.version = version + self.configure_service(service_name) ######################### - # Sessions + # Conversational skill providers ######################### - def create_session(self, assistant_id, **kwargs): + def create_provider( + self, + provider_id: str, + specification: 'ProviderSpecification', + private: 'ProviderPrivate', + **kwargs, + ) -> DetailedResponse: """ - Create a session. + Create a conversational skill provider. - Create a new session. A session is used to send user input to a skill and receive - responses. It also maintains the state of the conversation. + Create a new conversational skill provider. + + :param str provider_id: The unique identifier of the provider. + :param ProviderSpecification specification: The specification of the + provider. + :param ProviderPrivate private: Private information of the provider. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `ProviderResponse` object + """ + + if provider_id is None: + raise ValueError('provider_id must be provided') + if specification is None: + raise ValueError('specification must be provided') + if private is None: + raise ValueError('private must be provided') + specification = convert_model(specification) + private = convert_model(private) + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='create_provider', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + data = { + 'provider_id': provider_id, + 'specification': specification, + 'private': private, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + url = '/v2/providers' + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + data=data, + ) + + response = self.send(request, **kwargs) + return response + + def list_providers( + self, + *, + page_limit: Optional[int] = None, + include_count: Optional[bool] = None, + sort: Optional[str] = None, + cursor: Optional[str] = None, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: + """ + List conversational skill providers. + + List the conversational skill providers associated with a Watson Assistant service + instance. + + :param int page_limit: (optional) The number of records to return in each + page of results. + :param bool include_count: (optional) Whether to include information about + the number of records that satisfy the request, regardless of the page + limit. If this parameter is `true`, the `pagination` object in the response + includes the `total` property. + :param str sort: (optional) The attribute by which returned conversational + skill providers will be sorted. To reverse the sort order, prefix the value + with a minus sign (`-`). + :param str cursor: (optional) A token identifying the page of results to + retrieve. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `ProviderCollection` object + """ + + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='list_providers', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + 'page_limit': page_limit, + 'include_count': include_count, + 'sort': sort, + 'cursor': cursor, + 'include_audit': include_audit, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + url = '/v2/providers' + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def update_provider( + self, + provider_id: str, + specification: 'ProviderSpecification', + private: 'ProviderPrivate', + **kwargs, + ) -> DetailedResponse: + """ + Update a conversational skill provider. + + Update a new conversational skill provider. + + :param str provider_id: Unique identifier of the conversational skill + provider. + :param ProviderSpecification specification: The specification of the + provider. + :param ProviderPrivate private: Private information of the provider. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `ProviderResponse` object + """ + + if not provider_id: + raise ValueError('provider_id must be provided') + if specification is None: + raise ValueError('specification must be provided') + if private is None: + raise ValueError('private must be provided') + specification = convert_model(specification) + private = convert_model(private) + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='update_provider', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } - :param str assistant_id: Unique identifier of the assistant. You can find the - assistant ID of an assistant on the **Assistants** tab of the Watson Assistant - tool. For information about creating assistants, see the - [documentation](https://cloud.ibm.com/docs/services/assistant?topic=assistant-assistant-add#assistant-add-task). - **Note:** Currently, the v2 API does not support creating assistants. + data = { + 'specification': specification, + 'private': private, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['provider_id'] + path_param_values = self.encode_path_vars(provider_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/providers/{provider_id}'.format(**path_param_dict) + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + data=data, + ) + + response = self.send(request, **kwargs) + return response + + ######################### + # Assistants + ######################### + + def create_assistant( + self, + *, + language: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + **kwargs, + ) -> DetailedResponse: + """ + Create an assistant. + + Create a new assistant. + + :param str language: (optional) The language of the assistant. + :param str name: (optional) The name of the assistant. This string cannot + contain carriage return, newline, or tab characters. + :param str description: (optional) The description of the assistant. This + string cannot contain carriage return, newline, or tab characters. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `AssistantData` object + """ + + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='create_assistant', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + data = { + 'language': language, + 'name': name, + 'description': description, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + url = '/v2/assistants' + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + data=data, + ) + + response = self.send(request, **kwargs) + return response + + def list_assistants( + self, + *, + page_limit: Optional[int] = None, + include_count: Optional[bool] = None, + sort: Optional[str] = None, + cursor: Optional[str] = None, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: + """ + List assistants. + + List the assistants associated with a watsonx Assistant service instance. + + :param int page_limit: (optional) The number of records to return in each + page of results. + :param bool include_count: (optional) Whether to include information about + the number of records that satisfy the request, regardless of the page + limit. If this parameter is `true`, the `pagination` object in the response + includes the `total` property. + :param str sort: (optional) The attribute by which returned assistants will + be sorted. To reverse the sort order, prefix the value with a minus sign + (`-`). + :param str cursor: (optional) A token identifying the page of results to + retrieve. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `AssistantCollection` object + """ + + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='list_assistants', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + 'page_limit': page_limit, + 'include_count': include_count, + 'sort': sort, + 'cursor': cursor, + 'include_audit': include_audit, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + url = '/v2/assistants' + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def delete_assistant( + self, + assistant_id: str, + **kwargs, + ) -> DetailedResponse: + """ + Delete assistant. + + Delete an assistant. + + :param str assistant_id: Unique identifier of the assistant. To get the + **assistant ID** in the watsonx Assistant interface, open the **Assistant + settings** page, and scroll to the **Assistant IDs and API details** + section and click **View Details**. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ - if assistant_id is None: + if not assistant_id: raise ValueError('assistant_id must be provided') - headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='delete_assistant', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + if 'headers' in kwargs: headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V2', 'create_session') + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['assistant_id'] + path_param_values = self.encode_path_vars(assistant_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/assistants/{assistant_id}'.format(**path_param_dict) + request = self.prepare_request( + method='DELETE', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + ######################### + # Sessions + ######################### + + def create_session( + self, + assistant_id: str, + environment_id: str, + *, + analytics: Optional['RequestAnalytics'] = None, + **kwargs, + ) -> DetailedResponse: + """ + Create a session. + + Create a new session. A session is used to send user input to a skill and receive + responses. It also maintains the state of the conversation. A session persists + until it is deleted, or until it times out because of inactivity. (For more + information, see the + [documentation](https://cloud.ibm.com/docs/assistant?topic=assistant-assistant-settings).). + + :param str assistant_id: Unique identifier of the assistant. To get the + **assistant ID** in the watsonx Assistant interface, open the **Assistant + settings** page, and scroll to the **Assistant IDs and API details** + section and click **View Details**. + :param str environment_id: Unique identifier of the environment. To find + the environment ID in the watsonx Assistant user interface, open the + environment settings and click **API Details**. **Note:** Currently, the + API does not support creating environments. + :param RequestAnalytics analytics: (optional) An optional object containing + analytics data. Currently, this data is used only for events sent to the + Segment extension. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `SessionResponse` object + """ + + if not assistant_id: + raise ValueError('assistant_id must be provided') + if not environment_id: + raise ValueError('environment_id must be provided') + if analytics is not None: + analytics = convert_model(analytics) + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='create_session', + ) headers.update(sdk_headers) - params = {'version': self.version} + params = { + 'version': self.version, + } + + data = { + 'analytics': analytics, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' - url = '/v2/assistants/{0}/sessions'.format( - *self._encode_path_vars(assistant_id)) - response = self.request( + path_param_keys = ['assistant_id', 'environment_id'] + path_param_values = self.encode_path_vars(assistant_id, environment_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/assistants/{assistant_id}/environments/{environment_id}/sessions'.format( + **path_param_dict) + request = self.prepare_request( method='POST', url=url, headers=headers, params=params, - accept_json=True) + data=data, + ) + + response = self.send(request, **kwargs) return response - def delete_session(self, assistant_id, session_id, **kwargs): + def delete_session( + self, + assistant_id: str, + environment_id: str, + session_id: str, + **kwargs, + ) -> DetailedResponse: """ Delete session. - Deletes a session explicitly before it times out. + Deletes a session explicitly before it times out. (For more information about the + session inactivity timeout, see the + [documentation](https://cloud.ibm.com/docs/assistant?topic=assistant-assistant-settings)). - :param str assistant_id: Unique identifier of the assistant. You can find the - assistant ID of an assistant on the **Assistants** tab of the Watson Assistant - tool. For information about creating assistants, see the - [documentation](https://cloud.ibm.com/docs/services/assistant?topic=assistant-assistant-add#assistant-add-task). - **Note:** Currently, the v2 API does not support creating assistants. + :param str assistant_id: Unique identifier of the assistant. To get the + **assistant ID** in the watsonx Assistant interface, open the **Assistant + settings** page, and scroll to the **Assistant IDs and API details** + section and click **View Details**. + :param str environment_id: Unique identifier of the environment. To find + the environment ID in the watsonx Assistant user interface, open the + environment settings and click **API Details**. **Note:** Currently, the + API does not support creating environments. :param str session_id: Unique identifier of the session. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ - if assistant_id is None: + if not assistant_id: raise ValueError('assistant_id must be provided') - if session_id is None: + if not environment_id: + raise ValueError('environment_id must be provided') + if not session_id: raise ValueError('session_id must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V2', 'delete_session') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='delete_session', + ) headers.update(sdk_headers) - params = {'version': self.version} + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' - url = '/v2/assistants/{0}/sessions/{1}'.format( - *self._encode_path_vars(assistant_id, session_id)) - response = self.request( + path_param_keys = ['assistant_id', 'environment_id', 'session_id'] + path_param_values = self.encode_path_vars(assistant_id, environment_id, + session_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/assistants/{assistant_id}/environments/{environment_id}/sessions/{session_id}'.format( + **path_param_dict) + request = self.prepare_request( method='DELETE', url=url, headers=headers, params=params, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response ######################### # Message ######################### - def message(self, - assistant_id, - session_id, - input=None, - context=None, - **kwargs): + def message( + self, + assistant_id: str, + environment_id: str, + session_id: str, + *, + input: Optional['MessageInput'] = None, + context: Optional['MessageContext'] = None, + user_id: Optional[str] = None, + **kwargs, + ) -> DetailedResponse: """ - Send user input to assistant. + Send user input to assistant (stateful). - Send user input to an assistant and receive a response. - There is no rate limit for this operation. + Send user input to an assistant and receive a response, with conversation state + (including context data) stored by watsonx Assistant for the duration of the + session. - :param str assistant_id: Unique identifier of the assistant. You can find the - assistant ID of an assistant on the **Assistants** tab of the Watson Assistant - tool. For information about creating assistants, see the - [documentation](https://cloud.ibm.com/docs/services/assistant?topic=assistant-assistant-add#assistant-add-task). - **Note:** Currently, the v2 API does not support creating assistants. + :param str assistant_id: Unique identifier of the assistant. To get the + **assistant ID** in the watsonx Assistant interface, open the **Assistant + settings** page, and scroll to the **Assistant IDs and API details** + section and click **View Details**. + :param str environment_id: Unique identifier of the environment. To find + the environment ID in the watsonx Assistant user interface, open the + environment settings and click **API Details**. **Note:** Currently, the + API does not support creating environments. :param str session_id: Unique identifier of the session. - :param MessageInput input: An input object that includes the input text. - :param MessageContext context: State information for the conversation. The context - is stored by the assistant on a per-session basis. You can use this property to - set or modify context variables, which can also be accessed by dialog nodes. + :param MessageInput input: (optional) An input object that includes the + input text. + :param MessageContext context: (optional) Context data for the + conversation. You can use this property to set or modify context variables, + which can also be accessed by dialog nodes. The context is stored by the + assistant on a per-session basis. + **Note:** The total size of the context data stored for a stateful session + cannot exceed 100KB. + :param str user_id: (optional) A string value that identifies the user who + is interacting with the assistant. The client must provide a unique + identifier for each individual end user who accesses the application. For + user-based plans, this user ID is used to identify unique users for billing + purposes. This string cannot contain carriage return, newline, or tab + characters. If no value is specified in the input, **user_id** is + automatically set to the value of **context.global.session_id**. + **Note:** This property is the same as the **user_id** property in the + global system context. If **user_id** is specified in both locations, the + value specified at the root is used. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `StatefulMessageResponse` object """ - if assistant_id is None: + if not assistant_id: raise ValueError('assistant_id must be provided') - if session_id is None: + if not environment_id: + raise ValueError('environment_id must be provided') + if not session_id: raise ValueError('session_id must be provided') if input is not None: - input = self._convert_model(input, MessageInput) + input = convert_model(input) + if context is not None: + context = convert_model(context) + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='message', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + data = { + 'input': input, + 'context': context, + 'user_id': user_id, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['assistant_id', 'environment_id', 'session_id'] + path_param_values = self.encode_path_vars(assistant_id, environment_id, + session_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/assistants/{assistant_id}/environments/{environment_id}/sessions/{session_id}/message'.format( + **path_param_dict) + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + data=data, + ) + + response = self.send(request, **kwargs) + return response + + def message_stateless( + self, + assistant_id: str, + environment_id: str, + *, + input: Optional['StatelessMessageInput'] = None, + context: Optional['StatelessMessageContext'] = None, + user_id: Optional[str] = None, + **kwargs, + ) -> DetailedResponse: + """ + Send user input to assistant (stateless). + + Send user input to an assistant and receive a response, with conversation state + (including context data) managed by your application. + + :param str assistant_id: Unique identifier of the assistant. To get the + **assistant ID** in the watsonx Assistant interface, open the **Assistant + settings** page, and scroll to the **Assistant IDs and API details** + section and click **View Details**. + :param str environment_id: Unique identifier of the environment. To find + the environment ID in the watsonx Assistant user interface, open the + environment settings and click **API Details**. **Note:** Currently, the + API does not support creating environments. + :param StatelessMessageInput input: (optional) An input object that + includes the input text. + :param StatelessMessageContext context: (optional) Context data for the + conversation. You can use this property to set or modify context variables, + which can also be accessed by dialog nodes. The context is not stored by + the assistant. To maintain session state, include the context from the + previous response. + **Note:** The total size of the context data for a stateless session cannot + exceed 250KB. + :param str user_id: (optional) A string value that identifies the user who + is interacting with the assistant. The client must provide a unique + identifier for each individual end user who accesses the application. For + user-based plans, this user ID is used to identify unique users for billing + purposes. This string cannot contain carriage return, newline, or tab + characters. If no value is specified in the input, **user_id** is + automatically set to the value of **context.global.session_id**. + **Note:** This property is the same as the **user_id** property in the + global system context. If **user_id** is specified in both locations in a + message request, the value specified at the root is used. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `StatelessMessageResponse` object + """ + + if not assistant_id: + raise ValueError('assistant_id must be provided') + if not environment_id: + raise ValueError('environment_id must be provided') + if input is not None: + input = convert_model(input) if context is not None: - context = self._convert_model(context, MessageContext) + context = convert_model(context) + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='message_stateless', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + data = { + 'input': input, + 'context': context, + 'user_id': user_id, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['assistant_id', 'environment_id'] + path_param_values = self.encode_path_vars(assistant_id, environment_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/assistants/{assistant_id}/environments/{environment_id}/message'.format( + **path_param_dict) + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + data=data, + ) + + response = self.send(request, **kwargs) + return response + + ######################### + # Message Stream + ######################### + + def message_stream( + self, + assistant_id: str, + environment_id: str, + session_id: str, + *, + input: Optional['MessageInput'] = None, + context: Optional['MessageContext'] = None, + user_id: Optional[str] = None, + **kwargs, + ) -> DetailedResponse: + """ + Send user input to assistant (stateful). + + Send user input to an assistant and receive a streamed response, with conversation + state (including context data) stored by watsonx Assistant for the duration of the + session. + + :param str assistant_id: Unique identifier of the assistant. To get the + **assistant ID** in the watsonx Assistant interface, open the **Assistant + settings** page, and scroll to the **Assistant IDs and API details** + section and click **View Details**. + :param str environment_id: Unique identifier of the environment. To find + the environment ID in the watsonx Assistant user interface, open the + environment settings and click **API Details**. **Note:** Currently, the + API does not support creating environments. + :param str session_id: Unique identifier of the session. + :param MessageInput input: (optional) An input object that includes the + input text. + :param MessageContext context: (optional) Context data for the + conversation. You can use this property to set or modify context variables, + which can also be accessed by dialog nodes. The context is stored by the + assistant on a per-session basis. + **Note:** The total size of the context data stored for a stateful session + cannot exceed 100KB. + :param str user_id: (optional) A string value that identifies the user who + is interacting with the assistant. The client must provide a unique + identifier for each individual end user who accesses the application. For + user-based plans, this user ID is used to identify unique users for billing + purposes. This string cannot contain carriage return, newline, or tab + characters. If no value is specified in the input, **user_id** is + automatically set to the value of **context.global.session_id**. + **Note:** This property is the same as the **user_id** property in the + global system context. If **user_id** is specified in both locations, the + value specified at the root is used. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `BinaryIO` result + """ + + if not assistant_id: + raise ValueError('assistant_id must be provided') + if not environment_id: + raise ValueError('environment_id must be provided') + if not session_id: + raise ValueError('session_id must be provided') + if input is not None: + input = convert_model(input) + if context is not None: + context = convert_model(context) headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='message_stream', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + data = { + 'input': input, + 'context': context, + 'user_id': user_id, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + if 'headers' in kwargs: headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('conversation', 'V2', 'message') + del kwargs['headers'] + headers['Accept'] = 'text/event-stream' + + path_param_keys = ['assistant_id', 'environment_id', 'session_id'] + path_param_values = self.encode_path_vars(assistant_id, environment_id, + session_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/assistants/{assistant_id}/environments/{environment_id}/sessions/{session_id}/message_stream'.format( + **path_param_dict) + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + data=data, + ) + + response = self.send(request, **kwargs) + return response + + def message_stream_stateless( + self, + assistant_id: str, + environment_id: str, + *, + input: Optional['MessageInput'] = None, + context: Optional['MessageContext'] = None, + user_id: Optional[str] = None, + **kwargs, + ) -> DetailedResponse: + """ + Send user input to assistant (stateless). + + Send user input to an assistant and receive a response, with conversation state + (including context data) managed by your application. + + :param str assistant_id: Unique identifier of the assistant. To get the + **assistant ID** in the watsonx Assistant interface, open the **Assistant + settings** page, and scroll to the **Assistant IDs and API details** + section and click **View Details**. + :param str environment_id: Unique identifier of the environment. To find + the environment ID in the watsonx Assistant user interface, open the + environment settings and click **API Details**. **Note:** Currently, the + API does not support creating environments. + :param MessageInput input: (optional) An input object that includes the + input text. + :param MessageContext context: (optional) Context data for the + conversation. You can use this property to set or modify context variables, + which can also be accessed by dialog nodes. The context is stored by the + assistant on a per-session basis. + **Note:** The total size of the context data stored for a stateful session + cannot exceed 100KB. + :param str user_id: (optional) A string value that identifies the user who + is interacting with the assistant. The client must provide a unique + identifier for each individual end user who accesses the application. For + user-based plans, this user ID is used to identify unique users for billing + purposes. This string cannot contain carriage return, newline, or tab + characters. If no value is specified in the input, **user_id** is + automatically set to the value of **context.global.session_id**. + **Note:** This property is the same as the **user_id** property in the + global system context. If **user_id** is specified in both locations, the + value specified at the root is used. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `BinaryIO` result + """ + + if not assistant_id: + raise ValueError('assistant_id must be provided') + if not environment_id: + raise ValueError('environment_id must be provided') + if input is not None: + input = convert_model(input) + if context is not None: + context = convert_model(context) + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='message_stream_stateless', + ) headers.update(sdk_headers) - params = {'version': self.version} + params = { + 'version': self.version, + } - data = {'input': input, 'context': context} + data = { + 'input': input, + 'context': context, + 'user_id': user_id, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' - url = '/v2/assistants/{0}/sessions/{1}/message'.format( - *self._encode_path_vars(assistant_id, session_id)) - response = self.request( + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'text/event-stream' + + path_param_keys = ['assistant_id', 'environment_id'] + path_param_values = self.encode_path_vars(assistant_id, environment_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/assistants/{assistant_id}/environments/{environment_id}/message_stream'.format( + **path_param_dict) + request = self.prepare_request( method='POST', url=url, headers=headers, params=params, - json=data, - accept_json=True) + data=data, + ) + + response = self.send(request, **kwargs) return response + ######################### + # Bulk classify + ######################### -############################################################################## -# Models -############################################################################## + def bulk_classify( + self, + skill_id: str, + input: List['BulkClassifyUtterance'], + **kwargs, + ) -> DetailedResponse: + """ + Identify intents and entities in multiple user utterances. + + Send multiple user inputs to a dialog skill in a single request and receive + information about the intents and entities recognized in each input. This method + is useful for testing and comparing the performance of different skills or skill + versions. + This method is available only with Enterprise with Data Isolation plans. + + :param str skill_id: Unique identifier of the skill. To find the action or + dialog skill ID in the watsonx Assistant user interface, open the skill + settings and click **API Details**. To find the search skill ID, use the + Get environment API to retrieve the skill references for an environment and + it will include the search skill info, if available. + :param List[BulkClassifyUtterance] input: An array of input utterances to + classify. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `BulkClassifyResponse` object + """ + + if not skill_id: + raise ValueError('skill_id must be provided') + if input is None: + raise ValueError('input must be provided') + input = [convert_model(x) for x in input] + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='bulk_classify', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + data = { + 'input': input, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['skill_id'] + path_param_values = self.encode_path_vars(skill_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/skills/{skill_id}/workspace/bulk_classify'.format( + **path_param_dict) + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + data=data, + ) + + response = self.send(request, **kwargs) + return response + + ######################### + # Logs + ######################### + + def list_logs( + self, + assistant_id: str, + *, + sort: Optional[str] = None, + filter: Optional[str] = None, + page_limit: Optional[int] = None, + cursor: Optional[str] = None, + **kwargs, + ) -> DetailedResponse: + """ + List log events for an assistant. + + List the events from the log of an assistant. + This method requires Manager access. + **Note:** If you use the **cursor** parameter to retrieve results one page at a + time, subsequent requests must be no more than 5 minutes apart. Any returned value + for the **cursor** parameter becomes invalid after 5 minutes. For more information + about using pagination, see [Pagination](#pagination). + + :param str assistant_id: The assistant ID or the environment ID of the + environment where the assistant is deployed. + Set the value for this ID depending on the type of request: + - For message, session, and log requests, specify the environment ID of + the environment where the assistant is deployed. + - For all other requests, specify the assistant ID of the assistant. + To get the **assistant ID** and **environment ID** in the watsonx + Assistant interface, open the **Assistant settings** page, and scroll to + the **Assistant IDs and API details** section and click **View Details**. + **Note:** If you are using the classic Watson Assistant experience, always + use the assistant ID. + To find the **assistant ID** in the user interface, open the **Assistant + settings** and click **API Details**. + :param str sort: (optional) How to sort the returned log events. You can + sort by **request_timestamp**. To reverse the sort order, prefix the + parameter value with a minus sign (`-`). + :param str filter: (optional) A cacheable parameter that limits the results + to those matching the specified filter. For more information, see the + [documentation](https://cloud.ibm.com/docs/assistant?topic=assistant-filter-reference#filter-reference). + :param int page_limit: (optional) The number of records to return in each + page of results. + **Note:** If the API is not returning your data, try lowering the + page_limit value. + :param str cursor: (optional) A token identifying the page of results to + retrieve. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `LogCollection` object + """ + + if not assistant_id: + raise ValueError('assistant_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='list_logs', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + 'sort': sort, + 'filter': filter, + 'page_limit': page_limit, + 'cursor': cursor, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['assistant_id'] + path_param_values = self.encode_path_vars(assistant_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/assistants/{assistant_id}/logs'.format(**path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + ######################### + # User data + ######################### + + def delete_user_data( + self, + customer_id: str, + **kwargs, + ) -> DetailedResponse: + """ + Delete labeled data. + + Deletes all data associated with a specified customer ID. The method has no effect + if no data is associated with the customer ID. + You associate a customer ID with data by passing the `X-Watson-Metadata` header + with a request that passes data. For more information about personal data and + customer IDs, see [Information + security](https://cloud.ibm.com/docs/assistant?topic=assistant-information-security#information-security). + **Note:** This operation is intended only for deleting data associated with a + single specific customer, not for deleting data associated with multiple customers + or for any other purpose. For more information, see [Labeling and deleting data in + watsonx + Assistant](https://cloud.ibm.com/docs/assistant?topic=assistant-information-security#information-security-gdpr-wa). + + :param str customer_id: The customer ID for which all data is to be + deleted. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse + """ + + if not customer_id: + raise ValueError('customer_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='delete_user_data', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + 'customer_id': customer_id, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + url = '/v2/user_data' + request = self.prepare_request( + method='DELETE', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + ######################### + # Environments + ######################### + + def list_environments( + self, + assistant_id: str, + *, + page_limit: Optional[int] = None, + include_count: Optional[bool] = None, + sort: Optional[str] = None, + cursor: Optional[str] = None, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: + """ + List environments. + + List the environments associated with an assistant. + + :param str assistant_id: Unique identifier of the assistant. To get the + **assistant ID** in the watsonx Assistant interface, open the **Assistant + settings** page, and scroll to the **Assistant IDs and API details** + section and click **View Details**. + :param int page_limit: (optional) The number of records to return in each + page of results. + :param bool include_count: (optional) Whether to include information about + the number of records that satisfy the request, regardless of the page + limit. If this parameter is `true`, the `pagination` object in the response + includes the `total` property. + :param str sort: (optional) The attribute by which returned environments + will be sorted. To reverse the sort order, prefix the value with a minus + sign (`-`). + :param str cursor: (optional) A token identifying the page of results to + retrieve. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `EnvironmentCollection` object + """ + + if not assistant_id: + raise ValueError('assistant_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='list_environments', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + 'page_limit': page_limit, + 'include_count': include_count, + 'sort': sort, + 'cursor': cursor, + 'include_audit': include_audit, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['assistant_id'] + path_param_values = self.encode_path_vars(assistant_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/assistants/{assistant_id}/environments'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def get_environment( + self, + assistant_id: str, + environment_id: str, + *, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: + """ + Get environment. + + Get information about an environment. For more information about environments, see + [Environments](https://cloud.ibm.com/docs/watson-assistant?topic=watson-assistant-publish-overview#environments). + + :param str assistant_id: Unique identifier of the assistant. To get the + **assistant ID** in the watsonx Assistant interface, open the **Assistant + settings** page, and scroll to the **Assistant IDs and API details** + section and click **View Details**. + :param str environment_id: Unique identifier of the environment. To find + the environment ID in the watsonx Assistant user interface, open the + environment settings and click **API Details**. **Note:** Currently, the + API does not support creating environments. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `Environment` object + """ + + if not assistant_id: + raise ValueError('assistant_id must be provided') + if not environment_id: + raise ValueError('environment_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='get_environment', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + 'include_audit': include_audit, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['assistant_id', 'environment_id'] + path_param_values = self.encode_path_vars(assistant_id, environment_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/assistants/{assistant_id}/environments/{environment_id}'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def update_environment( + self, + assistant_id: str, + environment_id: str, + *, + name: Optional[str] = None, + description: Optional[str] = None, + orchestration: Optional['UpdateEnvironmentOrchestration'] = None, + session_timeout: Optional[int] = None, + skill_references: Optional[List['EnvironmentSkill']] = None, + **kwargs, + ) -> DetailedResponse: + """ + Update environment. + + Update an environment with new or modified data. For more information about + environments, see + [Environments](https://cloud.ibm.com/docs/watson-assistant?topic=watson-assistant-publish-overview#environments). + + :param str assistant_id: Unique identifier of the assistant. To get the + **assistant ID** in the watsonx Assistant interface, open the **Assistant + settings** page, and scroll to the **Assistant IDs and API details** + section and click **View Details**. + :param str environment_id: Unique identifier of the environment. To find + the environment ID in the watsonx Assistant user interface, open the + environment settings and click **API Details**. **Note:** Currently, the + API does not support creating environments. + :param str name: (optional) The name of the environment. + :param str description: (optional) The description of the environment. + :param UpdateEnvironmentOrchestration orchestration: (optional) The search + skill orchestration settings for the environment. + :param int session_timeout: (optional) The session inactivity timeout + setting for the environment (in seconds). + :param List[EnvironmentSkill] skill_references: (optional) An array of + objects identifying the skills (such as action and dialog) that exist in + the environment. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `Environment` object + """ + + if not assistant_id: + raise ValueError('assistant_id must be provided') + if not environment_id: + raise ValueError('environment_id must be provided') + if orchestration is not None: + orchestration = convert_model(orchestration) + if skill_references is not None: + skill_references = [convert_model(x) for x in skill_references] + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='update_environment', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + data = { + 'name': name, + 'description': description, + 'orchestration': orchestration, + 'session_timeout': session_timeout, + 'skill_references': skill_references, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['assistant_id', 'environment_id'] + path_param_values = self.encode_path_vars(assistant_id, environment_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/assistants/{assistant_id}/environments/{environment_id}'.format( + **path_param_dict) + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + data=data, + ) + + response = self.send(request, **kwargs) + return response + + ######################### + # Releases + ######################### + + def create_release( + self, + assistant_id: str, + *, + description: Optional[str] = None, + **kwargs, + ) -> DetailedResponse: + """ + Create release. + + Create a new release using the current content of the dialog and action skills in + the draft environment. (In the watsonx Assistant user interface, a release is + called a *version*.). + + :param str assistant_id: Unique identifier of the assistant. To get the + **assistant ID** in the watsonx Assistant interface, open the **Assistant + settings** page, and scroll to the **Assistant IDs and API details** + section and click **View Details**. + :param str description: (optional) The description of the release. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `Release` object + """ + + if not assistant_id: + raise ValueError('assistant_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='create_release', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + data = { + 'description': description, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['assistant_id'] + path_param_values = self.encode_path_vars(assistant_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/assistants/{assistant_id}/releases'.format(**path_param_dict) + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + data=data, + ) + + response = self.send(request, **kwargs) + return response + + def list_releases( + self, + assistant_id: str, + *, + page_limit: Optional[int] = None, + include_count: Optional[bool] = None, + sort: Optional[str] = None, + cursor: Optional[str] = None, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: + """ + List releases. + + List the releases associated with an assistant. (In the watsonx Assistant user + interface, a release is called a *version*.). + + :param str assistant_id: Unique identifier of the assistant. To get the + **assistant ID** in the watsonx Assistant interface, open the **Assistant + settings** page, and scroll to the **Assistant IDs and API details** + section and click **View Details**. + :param int page_limit: (optional) The number of records to return in each + page of results. + :param bool include_count: (optional) Whether to include information about + the number of records that satisfy the request, regardless of the page + limit. If this parameter is `true`, the `pagination` object in the response + includes the `total` property. + :param str sort: (optional) The attribute by which returned workspaces will + be sorted. To reverse the sort order, prefix the value with a minus sign + (`-`). + :param str cursor: (optional) A token identifying the page of results to + retrieve. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `ReleaseCollection` object + """ + + if not assistant_id: + raise ValueError('assistant_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='list_releases', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + 'page_limit': page_limit, + 'include_count': include_count, + 'sort': sort, + 'cursor': cursor, + 'include_audit': include_audit, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['assistant_id'] + path_param_values = self.encode_path_vars(assistant_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/assistants/{assistant_id}/releases'.format(**path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def get_release( + self, + assistant_id: str, + release: str, + *, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: + """ + Get release. + + Get information about a release. + Release data is not available until publishing of the release completes. If + publishing is still in progress, you can continue to poll by calling the same + request again and checking the value of the **status** property. When processing + has completed, the request returns the release data. + + :param str assistant_id: Unique identifier of the assistant. To get the + **assistant ID** in the watsonx Assistant interface, open the **Assistant + settings** page, and scroll to the **Assistant IDs and API details** + section and click **View Details**. + :param str release: Unique identifier of the release. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `Release` object + """ + + if not assistant_id: + raise ValueError('assistant_id must be provided') + if not release: + raise ValueError('release must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='get_release', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + 'include_audit': include_audit, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['assistant_id', 'release'] + path_param_values = self.encode_path_vars(assistant_id, release) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/assistants/{assistant_id}/releases/{release}'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def delete_release( + self, + assistant_id: str, + release: str, + **kwargs, + ) -> DetailedResponse: + """ + Delete release. + + Delete a release. (In the watsonx Assistant user interface, a release is called a + *version*.). + + :param str assistant_id: Unique identifier of the assistant. To get the + **assistant ID** in the watsonx Assistant interface, open the **Assistant + settings** page, and scroll to the **Assistant IDs and API details** + section and click **View Details**. + :param str release: Unique identifier of the release. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse + """ + + if not assistant_id: + raise ValueError('assistant_id must be provided') + if not release: + raise ValueError('release must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='delete_release', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['assistant_id', 'release'] + path_param_values = self.encode_path_vars(assistant_id, release) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/assistants/{assistant_id}/releases/{release}'.format( + **path_param_dict) + request = self.prepare_request( + method='DELETE', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def deploy_release( + self, + assistant_id: str, + release: str, + environment_id: str, + *, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: + """ + Deploy release. + + Update the environment with the content of the release. All snapshots saved as + part of the release become active in the environment. + + :param str assistant_id: Unique identifier of the assistant. To get the + **assistant ID** in the watsonx Assistant interface, open the **Assistant + settings** page, and scroll to the **Assistant IDs and API details** + section and click **View Details**. + :param str release: Unique identifier of the release. + :param str environment_id: The environment ID of the environment where the + release is to be deployed. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `Environment` object + """ + + if not assistant_id: + raise ValueError('assistant_id must be provided') + if not release: + raise ValueError('release must be provided') + if environment_id is None: + raise ValueError('environment_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='deploy_release', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + 'include_audit': include_audit, + } + + data = { + 'environment_id': environment_id, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['assistant_id', 'release'] + path_param_values = self.encode_path_vars(assistant_id, release) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/assistants/{assistant_id}/releases/{release}/deploy'.format( + **path_param_dict) + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + data=data, + ) + + response = self.send(request, **kwargs) + return response + + def create_release_export( + self, + assistant_id: str, + release: str, + *, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: + """ + Create release export. + + Initiate an asynchronous process which will create a downloadable Zip file + artifact (/package) for an assistant release. This artifact will contain Action + and/or Dialog skills that are part of the release. The Dialog skill will only be + included in the event that coexistence is enabled on the assistant. The expected + workflow with the use of Release Export endpoint is to first initiate the creation + of the artifact with the POST endpoint and then poll the GET endpoint to retrieve + the artifact. Once the artifact has been created, it will last for the duration + (/scope) of the release. + + :param str assistant_id: Unique identifier of the assistant. To get the + **assistant ID** in the watsonx Assistant interface, open the **Assistant + settings** page, and scroll to the **Assistant IDs and API details** + section and click **View Details**. + :param str release: Unique identifier of the release. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `CreateReleaseExportWithStatusErrors` object + """ + + if not assistant_id: + raise ValueError('assistant_id must be provided') + if not release: + raise ValueError('release must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='create_release_export', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + 'include_audit': include_audit, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['assistant_id', 'release'] + path_param_values = self.encode_path_vars(assistant_id, release) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/assistants/{assistant_id}/releases/{release}/export'.format( + **path_param_dict) + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def download_release_export( + self, + assistant_id: str, + release: str, + *, + accept: Optional[str] = None, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: + """ + Get release export. + + A dual function endpoint to either retrieve the Zip file artifact that is + associated with an assistant release or, retrieve the status of the artifact's + creation. It is assumed that the artifact creation was already initiated prior to + calling this endpoint. In the event that the artifact is not yet created and ready + for download, this endpoint can be used to poll the system until the creation is + completed or has failed. On the other hand, if the artifact is created, this + endpoint will return the Zip file artifact as an octet stream. Once the artifact + has been created, it will last for the duration (/scope) of the release.

When you will have downloaded the Zip file artifact, you have one of three ways + to import it into an assistant's draft environment. These are as follows.
  1. Import the zip package in Tooling via "Assistant Settings" -> + "Download/Upload files" -> "Upload" -> "Assistant only".
  2. Import the + zip package via "Create release import" endpoint using the APIs.
  3. Extract + the contents of the Zip file artifact and individually import the skill JSONs via + skill update endpoints.
. + + :param str assistant_id: Unique identifier of the assistant. To get the + **assistant ID** in the watsonx Assistant interface, open the **Assistant + settings** page, and scroll to the **Assistant IDs and API details** + section and click **View Details**. + :param str release: Unique identifier of the release. + :param str accept: (optional) The type of the response: application/json or + application/octet-stream. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `CreateReleaseExportWithStatusErrors` object + """ + + if not assistant_id: + raise ValueError('assistant_id must be provided') + if not release: + raise ValueError('release must be provided') + headers = { + 'Accept': accept, + } + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='download_release_export', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + 'include_audit': include_audit, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + + path_param_keys = ['assistant_id', 'release'] + path_param_values = self.encode_path_vars(assistant_id, release) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/assistants/{assistant_id}/releases/{release}/export'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def create_release_import( + self, + assistant_id: str, + body: BinaryIO, + *, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: + """ + Create release import. + + Import a previously exported assistant release Zip file artifact (/package) into + an assistant. This endpoint creates (/initiates) an asynchronous task (/job) in + the background which will import the artifact contents into the draft environment + of the assistant on which this endpoint is called. Specifically, the asynchronous + operation will override the action and/or dialog skills in the assistant. It will + be worth noting that when the artifact that is provided to this endpoint is from + an assistant release which has coexistence enabled (i.e., it has both action and + dialog skills), the import process will automatically enable coexistence, if not + already enabled, on the assistant into which said artifact is being uploaded to. + On the other hand, if the artifact package being imported only has action skill in + it, the import asynchronous process will only override the draft environment's + action skill, regardless of whether coexistence is enabled on the assistant into + which the package is being imported. Lastly, the system will only run one + asynchronous import at a time on an assistant. As such, consecutive imports will + override previous import's updates to the skills in the draft environment. Once + created, you may poll the completion of the import via the "Get release import + Status" endpoint. + + :param str assistant_id: Unique identifier of the assistant. To get the + **assistant ID** in the watsonx Assistant interface, open the **Assistant + settings** page, and scroll to the **Assistant IDs and API details** + section and click **View Details**. + :param BinaryIO body: Request body is an Octet-stream of the artifact Zip + file that is being imported. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `CreateAssistantReleaseImportResponse` object + """ + + if not assistant_id: + raise ValueError('assistant_id must be provided') + if body is None: + raise ValueError('body must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='create_release_import', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + 'include_audit': include_audit, + } + + data = body + headers['content-type'] = 'application/octet-stream' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['assistant_id'] + path_param_values = self.encode_path_vars(assistant_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/assistants/{assistant_id}/import'.format(**path_param_dict) + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + data=data, + ) + + response = self.send(request, **kwargs) + return response + + def get_release_import_status( + self, + assistant_id: str, + *, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: + """ + Get release import Status. + + Monitor the status of an assistant release import. You may poll this endpoint + until the status of the import has either succeeded or failed. + + :param str assistant_id: Unique identifier of the assistant. To get the + **assistant ID** in the watsonx Assistant interface, open the **Assistant + settings** page, and scroll to the **Assistant IDs and API details** + section and click **View Details**. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `MonitorAssistantReleaseImportArtifactResponse` object + """ + + if not assistant_id: + raise ValueError('assistant_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='get_release_import_status', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + 'include_audit': include_audit, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['assistant_id'] + path_param_values = self.encode_path_vars(assistant_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/assistants/{assistant_id}/import'.format(**path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + ######################### + # Skills + ######################### + + def get_skill( + self, + assistant_id: str, + skill_id: str, + **kwargs, + ) -> DetailedResponse: + """ + Get skill. + + Get information about a skill. + + :param str assistant_id: Unique identifier of the assistant. To get the + **assistant ID** in the watsonx Assistant interface, open the **Assistant + settings** page, and scroll to the **Assistant IDs and API details** + section and click **View Details**. + :param str skill_id: Unique identifier of the skill. To find the action or + dialog skill ID in the watsonx Assistant user interface, open the skill + settings and click **API Details**. To find the search skill ID, use the + Get environment API to retrieve the skill references for an environment and + it will include the search skill info, if available. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `Skill` object + """ + + if not assistant_id: + raise ValueError('assistant_id must be provided') + if not skill_id: + raise ValueError('skill_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='get_skill', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['assistant_id', 'skill_id'] + path_param_values = self.encode_path_vars(assistant_id, skill_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/assistants/{assistant_id}/skills/{skill_id}'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def update_skill( + self, + assistant_id: str, + skill_id: str, + *, + name: Optional[str] = None, + description: Optional[str] = None, + workspace: Optional[dict] = None, + dialog_settings: Optional[dict] = None, + search_settings: Optional['SearchSettings'] = None, + **kwargs, + ) -> DetailedResponse: + """ + Update skill. + + Update a skill with new or modified data. + **Note:** The update is performed asynchronously; you can see the status of the + update by calling the **Get skill** method and checking the value of the + **status** property. + + :param str assistant_id: Unique identifier of the assistant. To get the + **assistant ID** in the watsonx Assistant interface, open the **Assistant + settings** page, and scroll to the **Assistant IDs and API details** + section and click **View Details**. + :param str skill_id: Unique identifier of the skill. To find the action or + dialog skill ID in the watsonx Assistant user interface, open the skill + settings and click **API Details**. To find the search skill ID, use the + Get environment API to retrieve the skill references for an environment and + it will include the search skill info, if available. + :param str name: (optional) The name of the skill. This string cannot + contain carriage return, newline, or tab characters. + :param str description: (optional) The description of the skill. This + string cannot contain carriage return, newline, or tab characters. + :param dict workspace: (optional) An object containing the conversational + content of an action or dialog skill. + :param dict dialog_settings: (optional) For internal use only. + :param SearchSettings search_settings: (optional) An object describing the + search skill configuration. + **Note:** Search settings are not supported in **Import skills** requests, + and are not included in **Export skills** responses. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `Skill` object + """ + + if not assistant_id: + raise ValueError('assistant_id must be provided') + if not skill_id: + raise ValueError('skill_id must be provided') + if search_settings is not None: + search_settings = convert_model(search_settings) + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='update_skill', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + data = { + 'name': name, + 'description': description, + 'workspace': workspace, + 'dialog_settings': dialog_settings, + 'search_settings': search_settings, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['assistant_id', 'skill_id'] + path_param_values = self.encode_path_vars(assistant_id, skill_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/assistants/{assistant_id}/skills/{skill_id}'.format( + **path_param_dict) + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + data=data, + ) + + response = self.send(request, **kwargs) + return response + + def export_skills( + self, + assistant_id: str, + *, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: + """ + Export skills. + + Asynchronously export the action skill and dialog skill (if enabled) for the + assistant. Use this method to save all skill data from the draft environment so + that you can import it to a different assistant using the **Import skills** + method. Use `assistant_id` instead of `environment_id` to call this endpoint. + A successful call to this method only initiates an asynchronous export. The + exported JSON data is not available until processing completes. + After the initial request is submitted, you can poll the status of the operation + by calling the same request again and checking the value of the **status** + property. If an error occurs (indicated by a **status** value of `Failed`), the + `status_description` property provides more information about the error, and the + `status_errors` property contains an array of error messages that caused the + failure. + When processing has completed, the request returns the exported JSON data. + Remember that the usual rate limits apply. + + :param str assistant_id: Unique identifier of the assistant. To get the + **assistant ID** in the watsonx Assistant interface, open the **Assistant + settings** page, and scroll to the **Assistant IDs and API details** + section and click **View Details**. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `SkillsExport` object + """ + + if not assistant_id: + raise ValueError('assistant_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='export_skills', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + 'include_audit': include_audit, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['assistant_id'] + path_param_values = self.encode_path_vars(assistant_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/assistants/{assistant_id}/skills_export'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def import_skills( + self, + assistant_id: str, + assistant_skills: List['SkillImport'], + assistant_state: 'AssistantState', + *, + include_audit: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: + """ + Import skills. + + Asynchronously import skills into an existing assistant from a previously exported + file. This method only imports assistants into a draft environment. Use + `assistant_id` instead of `environment_id` to call this endpoint. + The request body for this method should contain the response data that was + received from a previous call to the **Export skills** method, without + modification. + A successful call to this method initiates an asynchronous import. The updated + skills belonging to the assistant are not available until processing completes. To + check the status of the asynchronous import operation, use the **Get status of + skills import** method. + + :param str assistant_id: Unique identifier of the assistant. To get the + **assistant ID** in the watsonx Assistant interface, open the **Assistant + settings** page, and scroll to the **Assistant IDs and API details** + section and click **View Details**. + :param List[SkillImport] assistant_skills: An array of objects describing + the skills for the assistant. Included in responses only if + **status**=`Available`. + :param AssistantState assistant_state: Status information about the skills + for the assistant. Included in responses only if **status**=`Available`. + :param bool include_audit: (optional) Whether to include the audit + properties (`created` and `updated` timestamps) in the response. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `SkillsAsyncRequestStatus` object + """ + + if not assistant_id: + raise ValueError('assistant_id must be provided') + if assistant_skills is None: + raise ValueError('assistant_skills must be provided') + if assistant_state is None: + raise ValueError('assistant_state must be provided') + assistant_skills = [convert_model(x) for x in assistant_skills] + assistant_state = convert_model(assistant_state) + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='import_skills', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + 'include_audit': include_audit, + } + + data = { + 'assistant_skills': assistant_skills, + 'assistant_state': assistant_state, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['assistant_id'] + path_param_values = self.encode_path_vars(assistant_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/assistants/{assistant_id}/skills_import'.format( + **path_param_dict) + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + data=data, + ) + + response = self.send(request, **kwargs) + return response + + def import_skills_status( + self, + assistant_id: str, + **kwargs, + ) -> DetailedResponse: + """ + Get status of skills import. + + Retrieve the status of an asynchronous import operation previously initiated by + using the **Import skills** method. + + :param str assistant_id: Unique identifier of the assistant. To get the + **assistant ID** in the watsonx Assistant interface, open the **Assistant + settings** page, and scroll to the **Assistant IDs and API details** + section and click **View Details**. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `SkillsAsyncRequestStatus` object + """ + + if not assistant_id: + raise ValueError('assistant_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='import_skills_status', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['assistant_id'] + path_param_values = self.encode_path_vars(assistant_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/assistants/{assistant_id}/skills_import/status'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + +class ListProvidersEnums: + """ + Enums for list_providers parameters. + """ + + class Sort(str, Enum): + """ + The attribute by which returned conversational skill providers will be sorted. To + reverse the sort order, prefix the value with a minus sign (`-`). + """ + + NAME = 'name' + UPDATED = 'updated' + + +class ListAssistantsEnums: + """ + Enums for list_assistants parameters. + """ + + class Sort(str, Enum): + """ + The attribute by which returned assistants will be sorted. To reverse the sort + order, prefix the value with a minus sign (`-`). + """ + + NAME = 'name' + UPDATED = 'updated' + + +class ListEnvironmentsEnums: + """ + Enums for list_environments parameters. + """ + + class Sort(str, Enum): + """ + The attribute by which returned environments will be sorted. To reverse the sort + order, prefix the value with a minus sign (`-`). + """ + + NAME = 'name' + UPDATED = 'updated' + + +class ListReleasesEnums: + """ + Enums for list_releases parameters. + """ + + class Sort(str, Enum): + """ + The attribute by which returned workspaces will be sorted. To reverse the sort + order, prefix the value with a minus sign (`-`). + """ + + NAME = 'name' + UPDATED = 'updated' + + +class DownloadReleaseExportEnums: + """ + Enums for download_release_export parameters. + """ + + class Accept(str, Enum): + """ + The type of the response: application/json or application/octet-stream. + """ + + APPLICATION_JSON = 'application/json' + APPLICATION_OCTET_STREAM = 'application/octet-stream' + + +############################################################################## +# Models +############################################################################## + + +class AgentAvailabilityMessage: + """ + AgentAvailabilityMessage. + + :param str message: (optional) The text of the message. + """ + + def __init__( + self, + *, + message: Optional[str] = None, + ) -> None: + """ + Initialize a AgentAvailabilityMessage object. + + :param str message: (optional) The text of the message. + """ + self.message = message + + @classmethod + def from_dict(cls, _dict: Dict) -> 'AgentAvailabilityMessage': + """Initialize a AgentAvailabilityMessage object from a json dictionary.""" + args = {} + if (message := _dict.get('message')) is not None: + args['message'] = message + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a AgentAvailabilityMessage object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'message') and self.message is not None: + _dict['message'] = self.message + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this AgentAvailabilityMessage object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'AgentAvailabilityMessage') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'AgentAvailabilityMessage') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class AssistantCollection: + """ + AssistantCollection. + + :param List[AssistantData] assistants: An array of objects describing the + assistants associated with the instance. + :param Pagination pagination: The pagination data for the returned objects. For + more information about using pagination, see [Pagination](#pagination). + """ + + def __init__( + self, + assistants: List['AssistantData'], + pagination: 'Pagination', + ) -> None: + """ + Initialize a AssistantCollection object. + + :param List[AssistantData] assistants: An array of objects describing the + assistants associated with the instance. + :param Pagination pagination: The pagination data for the returned objects. + For more information about using pagination, see [Pagination](#pagination). + """ + self.assistants = assistants + self.pagination = pagination + + @classmethod + def from_dict(cls, _dict: Dict) -> 'AssistantCollection': + """Initialize a AssistantCollection object from a json dictionary.""" + args = {} + if (assistants := _dict.get('assistants')) is not None: + args['assistants'] = [ + AssistantData.from_dict(v) for v in assistants + ] + else: + raise ValueError( + 'Required property \'assistants\' not present in AssistantCollection JSON' + ) + if (pagination := _dict.get('pagination')) is not None: + args['pagination'] = Pagination.from_dict(pagination) + else: + raise ValueError( + 'Required property \'pagination\' not present in AssistantCollection JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a AssistantCollection object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'assistants') and self.assistants is not None: + assistants_list = [] + for v in self.assistants: + if isinstance(v, dict): + assistants_list.append(v) + else: + assistants_list.append(v.to_dict()) + _dict['assistants'] = assistants_list + if hasattr(self, 'pagination') and self.pagination is not None: + if isinstance(self.pagination, dict): + _dict['pagination'] = self.pagination + else: + _dict['pagination'] = self.pagination.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this AssistantCollection object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'AssistantCollection') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'AssistantCollection') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class AssistantData: + """ + AssistantData. + + :param str assistant_id: (optional) The unique identifier of the assistant. + :param str name: (optional) The name of the assistant. This string cannot + contain carriage return, newline, or tab characters. + :param str description: (optional) The description of the assistant. This string + cannot contain carriage return, newline, or tab characters. + :param str language: The language of the assistant. + :param List[AssistantSkill] assistant_skills: (optional) An array of skill + references identifying the skills associated with the assistant. + :param List[EnvironmentReference] assistant_environments: (optional) An array of + objects describing the environments defined for the assistant. + """ + + def __init__( + self, + language: str, + *, + assistant_id: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + assistant_skills: Optional[List['AssistantSkill']] = None, + assistant_environments: Optional[List['EnvironmentReference']] = None, + ) -> None: + """ + Initialize a AssistantData object. + + :param str language: The language of the assistant. + :param str name: (optional) The name of the assistant. This string cannot + contain carriage return, newline, or tab characters. + :param str description: (optional) The description of the assistant. This + string cannot contain carriage return, newline, or tab characters. + """ + self.assistant_id = assistant_id + self.name = name + self.description = description + self.language = language + self.assistant_skills = assistant_skills + self.assistant_environments = assistant_environments + + @classmethod + def from_dict(cls, _dict: Dict) -> 'AssistantData': + """Initialize a AssistantData object from a json dictionary.""" + args = {} + if (assistant_id := _dict.get('assistant_id')) is not None: + args['assistant_id'] = assistant_id + if (name := _dict.get('name')) is not None: + args['name'] = name + if (description := _dict.get('description')) is not None: + args['description'] = description + if (language := _dict.get('language')) is not None: + args['language'] = language + else: + raise ValueError( + 'Required property \'language\' not present in AssistantData JSON' + ) + if (assistant_skills := _dict.get('assistant_skills')) is not None: + args['assistant_skills'] = [ + AssistantSkill.from_dict(v) for v in assistant_skills + ] + if (assistant_environments := + _dict.get('assistant_environments')) is not None: + args['assistant_environments'] = [ + EnvironmentReference.from_dict(v) + for v in assistant_environments + ] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a AssistantData object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'assistant_id') and getattr( + self, 'assistant_id') is not None: + _dict['assistant_id'] = getattr(self, 'assistant_id') + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'language') and self.language is not None: + _dict['language'] = self.language + if hasattr(self, 'assistant_skills') and getattr( + self, 'assistant_skills') is not None: + assistant_skills_list = [] + for v in getattr(self, 'assistant_skills'): + if isinstance(v, dict): + assistant_skills_list.append(v) + else: + assistant_skills_list.append(v.to_dict()) + _dict['assistant_skills'] = assistant_skills_list + if hasattr(self, 'assistant_environments') and getattr( + self, 'assistant_environments') is not None: + assistant_environments_list = [] + for v in getattr(self, 'assistant_environments'): + if isinstance(v, dict): + assistant_environments_list.append(v) + else: + assistant_environments_list.append(v.to_dict()) + _dict['assistant_environments'] = assistant_environments_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this AssistantData object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'AssistantData') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'AssistantData') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class AssistantSkill: + """ + AssistantSkill. + + :param str skill_id: The skill ID of the skill. + :param str type: (optional) The type of the skill. + """ + + def __init__( + self, + skill_id: str, + *, + type: Optional[str] = None, + ) -> None: + """ + Initialize a AssistantSkill object. + + :param str skill_id: The skill ID of the skill. + :param str type: (optional) The type of the skill. + """ + self.skill_id = skill_id + self.type = type + + @classmethod + def from_dict(cls, _dict: Dict) -> 'AssistantSkill': + """Initialize a AssistantSkill object from a json dictionary.""" + args = {} + if (skill_id := _dict.get('skill_id')) is not None: + args['skill_id'] = skill_id + else: + raise ValueError( + 'Required property \'skill_id\' not present in AssistantSkill JSON' + ) + if (type := _dict.get('type')) is not None: + args['type'] = type + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a AssistantSkill object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'skill_id') and self.skill_id is not None: + _dict['skill_id'] = self.skill_id + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this AssistantSkill object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'AssistantSkill') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'AssistantSkill') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class TypeEnum(str, Enum): + """ + The type of the skill. + """ + + DIALOG = 'dialog' + ACTION = 'action' + SEARCH = 'search' + + +class AssistantState: + """ + Status information about the skills for the assistant. Included in responses only if + **status**=`Available`. + + :param bool action_disabled: Whether the action skill is disabled in the draft + environment. + :param bool dialog_disabled: Whether the dialog skill is disabled in the draft + environment. + """ + + def __init__( + self, + action_disabled: bool, + dialog_disabled: bool, + ) -> None: + """ + Initialize a AssistantState object. + + :param bool action_disabled: Whether the action skill is disabled in the + draft environment. + :param bool dialog_disabled: Whether the dialog skill is disabled in the + draft environment. + """ + self.action_disabled = action_disabled + self.dialog_disabled = dialog_disabled + + @classmethod + def from_dict(cls, _dict: Dict) -> 'AssistantState': + """Initialize a AssistantState object from a json dictionary.""" + args = {} + if (action_disabled := _dict.get('action_disabled')) is not None: + args['action_disabled'] = action_disabled + else: + raise ValueError( + 'Required property \'action_disabled\' not present in AssistantState JSON' + ) + if (dialog_disabled := _dict.get('dialog_disabled')) is not None: + args['dialog_disabled'] = dialog_disabled + else: + raise ValueError( + 'Required property \'dialog_disabled\' not present in AssistantState JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a AssistantState object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, + 'action_disabled') and self.action_disabled is not None: + _dict['action_disabled'] = self.action_disabled + if hasattr(self, + 'dialog_disabled') and self.dialog_disabled is not None: + _dict['dialog_disabled'] = self.dialog_disabled + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this AssistantState object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'AssistantState') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'AssistantState') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class BaseEnvironmentOrchestration: + """ + The search skill orchestration settings for the environment. + + :param bool search_skill_fallback: (optional) Whether to fall back to a search + skill when responding to messages that do not match any intent or action defined + in dialog or action skills. (If no search skill is configured for the + environment, this property is ignored.). + """ + + def __init__( + self, + *, + search_skill_fallback: Optional[bool] = None, + ) -> None: + """ + Initialize a BaseEnvironmentOrchestration object. + + :param bool search_skill_fallback: (optional) Whether to fall back to a + search skill when responding to messages that do not match any intent or + action defined in dialog or action skills. (If no search skill is + configured for the environment, this property is ignored.). + """ + self.search_skill_fallback = search_skill_fallback + + @classmethod + def from_dict(cls, _dict: Dict) -> 'BaseEnvironmentOrchestration': + """Initialize a BaseEnvironmentOrchestration object from a json dictionary.""" + args = {} + if (search_skill_fallback := + _dict.get('search_skill_fallback')) is not None: + args['search_skill_fallback'] = search_skill_fallback + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a BaseEnvironmentOrchestration object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'search_skill_fallback' + ) and self.search_skill_fallback is not None: + _dict['search_skill_fallback'] = self.search_skill_fallback + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this BaseEnvironmentOrchestration object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'BaseEnvironmentOrchestration') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'BaseEnvironmentOrchestration') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class BaseEnvironmentReleaseReference: + """ + An object describing the release that is currently deployed in the environment. + + :param str release: (optional) The name of the deployed release. + """ + + def __init__( + self, + *, + release: Optional[str] = None, + ) -> None: + """ + Initialize a BaseEnvironmentReleaseReference object. + + :param str release: (optional) The name of the deployed release. + """ + self.release = release + + @classmethod + def from_dict(cls, _dict: Dict) -> 'BaseEnvironmentReleaseReference': + """Initialize a BaseEnvironmentReleaseReference object from a json dictionary.""" + args = {} + if (release := _dict.get('release')) is not None: + args['release'] = release + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a BaseEnvironmentReleaseReference object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'release') and self.release is not None: + _dict['release'] = self.release + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this BaseEnvironmentReleaseReference object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'BaseEnvironmentReleaseReference') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'BaseEnvironmentReleaseReference') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class BulkClassifyOutput: + """ + BulkClassifyOutput. + + :param BulkClassifyUtterance input: (optional) The user input utterance to + classify. + :param List[RuntimeEntity] entities: (optional) An array of entities identified + in the utterance. + :param List[RuntimeIntent] intents: (optional) An array of intents recognized in + the utterance. + """ + + def __init__( + self, + *, + input: Optional['BulkClassifyUtterance'] = None, + entities: Optional[List['RuntimeEntity']] = None, + intents: Optional[List['RuntimeIntent']] = None, + ) -> None: + """ + Initialize a BulkClassifyOutput object. + + :param BulkClassifyUtterance input: (optional) The user input utterance to + classify. + :param List[RuntimeEntity] entities: (optional) An array of entities + identified in the utterance. + :param List[RuntimeIntent] intents: (optional) An array of intents + recognized in the utterance. + """ + self.input = input + self.entities = entities + self.intents = intents + + @classmethod + def from_dict(cls, _dict: Dict) -> 'BulkClassifyOutput': + """Initialize a BulkClassifyOutput object from a json dictionary.""" + args = {} + if (input := _dict.get('input')) is not None: + args['input'] = BulkClassifyUtterance.from_dict(input) + if (entities := _dict.get('entities')) is not None: + args['entities'] = [RuntimeEntity.from_dict(v) for v in entities] + if (intents := _dict.get('intents')) is not None: + args['intents'] = [RuntimeIntent.from_dict(v) for v in intents] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a BulkClassifyOutput object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'input') and self.input is not None: + if isinstance(self.input, dict): + _dict['input'] = self.input + else: + _dict['input'] = self.input.to_dict() + if hasattr(self, 'entities') and self.entities is not None: + entities_list = [] + for v in self.entities: + if isinstance(v, dict): + entities_list.append(v) + else: + entities_list.append(v.to_dict()) + _dict['entities'] = entities_list + if hasattr(self, 'intents') and self.intents is not None: + intents_list = [] + for v in self.intents: + if isinstance(v, dict): + intents_list.append(v) + else: + intents_list.append(v.to_dict()) + _dict['intents'] = intents_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this BulkClassifyOutput object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'BulkClassifyOutput') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'BulkClassifyOutput') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class BulkClassifyResponse: + """ + BulkClassifyResponse. + + :param List[BulkClassifyOutput] output: (optional) An array of objects that + contain classification information for the submitted input utterances. + """ + + def __init__( + self, + *, + output: Optional[List['BulkClassifyOutput']] = None, + ) -> None: + """ + Initialize a BulkClassifyResponse object. + + :param List[BulkClassifyOutput] output: (optional) An array of objects that + contain classification information for the submitted input utterances. + """ + self.output = output + + @classmethod + def from_dict(cls, _dict: Dict) -> 'BulkClassifyResponse': + """Initialize a BulkClassifyResponse object from a json dictionary.""" + args = {} + if (output := _dict.get('output')) is not None: + args['output'] = [BulkClassifyOutput.from_dict(v) for v in output] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a BulkClassifyResponse object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'output') and self.output is not None: + output_list = [] + for v in self.output: + if isinstance(v, dict): + output_list.append(v) + else: + output_list.append(v.to_dict()) + _dict['output'] = output_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this BulkClassifyResponse object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'BulkClassifyResponse') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'BulkClassifyResponse') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class BulkClassifyUtterance: + """ + The user input utterance to classify. + + :param str text: The text of the input utterance. + """ + + def __init__( + self, + text: str, + ) -> None: + """ + Initialize a BulkClassifyUtterance object. + + :param str text: The text of the input utterance. + """ + self.text = text + + @classmethod + def from_dict(cls, _dict: Dict) -> 'BulkClassifyUtterance': + """Initialize a BulkClassifyUtterance object from a json dictionary.""" + args = {} + if (text := _dict.get('text')) is not None: + args['text'] = text + else: + raise ValueError( + 'Required property \'text\' not present in BulkClassifyUtterance JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a BulkClassifyUtterance object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'text') and self.text is not None: + _dict['text'] = self.text + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this BulkClassifyUtterance object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'BulkClassifyUtterance') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'BulkClassifyUtterance') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class CaptureGroup: + """ + CaptureGroup. + + :param str group: A recognized capture group for the entity. + :param List[int] location: (optional) Zero-based character offsets that indicate + where the entity value begins and ends in the input text. + """ + + def __init__( + self, + group: str, + *, + location: Optional[List[int]] = None, + ) -> None: + """ + Initialize a CaptureGroup object. + + :param str group: A recognized capture group for the entity. + :param List[int] location: (optional) Zero-based character offsets that + indicate where the entity value begins and ends in the input text. + """ + self.group = group + self.location = location + + @classmethod + def from_dict(cls, _dict: Dict) -> 'CaptureGroup': + """Initialize a CaptureGroup object from a json dictionary.""" + args = {} + if (group := _dict.get('group')) is not None: + args['group'] = group + else: + raise ValueError( + 'Required property \'group\' not present in CaptureGroup JSON') + if (location := _dict.get('location')) is not None: + args['location'] = location + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a CaptureGroup object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'group') and self.group is not None: + _dict['group'] = self.group + if hasattr(self, 'location') and self.location is not None: + _dict['location'] = self.location + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this CaptureGroup object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'CaptureGroup') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'CaptureGroup') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ChannelTransferInfo: + """ + Information used by an integration to transfer the conversation to a different + channel. + + :param ChannelTransferTarget target: An object specifying target channels + available for the transfer. Each property of this object represents an available + transfer target. Currently, the only supported property is **chat**, + representing the web chat integration. + """ + + def __init__( + self, + target: 'ChannelTransferTarget', + ) -> None: + """ + Initialize a ChannelTransferInfo object. + + :param ChannelTransferTarget target: An object specifying target channels + available for the transfer. Each property of this object represents an + available transfer target. Currently, the only supported property is + **chat**, representing the web chat integration. + """ + self.target = target + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ChannelTransferInfo': + """Initialize a ChannelTransferInfo object from a json dictionary.""" + args = {} + if (target := _dict.get('target')) is not None: + args['target'] = ChannelTransferTarget.from_dict(target) + else: + raise ValueError( + 'Required property \'target\' not present in ChannelTransferInfo JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ChannelTransferInfo object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'target') and self.target is not None: + if isinstance(self.target, dict): + _dict['target'] = self.target + else: + _dict['target'] = self.target.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ChannelTransferInfo object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ChannelTransferInfo') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ChannelTransferInfo') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ChannelTransferTarget: + """ + An object specifying target channels available for the transfer. Each property of this + object represents an available transfer target. Currently, the only supported property + is **chat**, representing the web chat integration. + + :param ChannelTransferTargetChat chat: (optional) Information for transferring + to the web chat integration. + """ + + def __init__( + self, + *, + chat: Optional['ChannelTransferTargetChat'] = None, + ) -> None: + """ + Initialize a ChannelTransferTarget object. + + :param ChannelTransferTargetChat chat: (optional) Information for + transferring to the web chat integration. + """ + self.chat = chat + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ChannelTransferTarget': + """Initialize a ChannelTransferTarget object from a json dictionary.""" + args = {} + if (chat := _dict.get('chat')) is not None: + args['chat'] = ChannelTransferTargetChat.from_dict(chat) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ChannelTransferTarget object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'chat') and self.chat is not None: + if isinstance(self.chat, dict): + _dict['chat'] = self.chat + else: + _dict['chat'] = self.chat.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ChannelTransferTarget object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ChannelTransferTarget') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ChannelTransferTarget') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ChannelTransferTargetChat: + """ + Information for transferring to the web chat integration. + + :param str url: (optional) The URL of the target web chat. + """ + + def __init__( + self, + *, + url: Optional[str] = None, + ) -> None: + """ + Initialize a ChannelTransferTargetChat object. + + :param str url: (optional) The URL of the target web chat. + """ + self.url = url + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ChannelTransferTargetChat': + """Initialize a ChannelTransferTargetChat object from a json dictionary.""" + args = {} + if (url := _dict.get('url')) is not None: + args['url'] = url + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ChannelTransferTargetChat object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'url') and self.url is not None: + _dict['url'] = self.url + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ChannelTransferTargetChat object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ChannelTransferTargetChat') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ChannelTransferTargetChat') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ClientAction: + """ + ClientAction. + + :param str name: (optional) The name of the client action. + :param str result_variable: (optional) The name of the variable that the results + are stored in. + :param str type: (optional) The type of turn event. + :param str skill: (optional) The skill that is requesting the action. Included + only if **type**=`client`. + :param dict parameters: (optional) An object containing arbitrary variables that + are included in the turn event. + """ + + def __init__( + self, + *, + name: Optional[str] = None, + result_variable: Optional[str] = None, + type: Optional[str] = None, + skill: Optional[str] = None, + parameters: Optional[dict] = None, + ) -> None: + """ + Initialize a ClientAction object. + + :param str name: (optional) The name of the client action. + :param str result_variable: (optional) The name of the variable that the + results are stored in. + :param str type: (optional) The type of turn event. + :param str skill: (optional) The skill that is requesting the action. + Included only if **type**=`client`. + :param dict parameters: (optional) An object containing arbitrary variables + that are included in the turn event. + """ + self.name = name + self.result_variable = result_variable + self.type = type + self.skill = skill + self.parameters = parameters + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ClientAction': + """Initialize a ClientAction object from a json dictionary.""" + args = {} + if (name := _dict.get('name')) is not None: + args['name'] = name + if (result_variable := _dict.get('result_variable')) is not None: + args['result_variable'] = result_variable + if (type := _dict.get('type')) is not None: + args['type'] = type + if (skill := _dict.get('skill')) is not None: + args['skill'] = skill + if (parameters := _dict.get('parameters')) is not None: + args['parameters'] = parameters + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ClientAction object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, + 'result_variable') and self.result_variable is not None: + _dict['result_variable'] = self.result_variable + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'skill') and self.skill is not None: + _dict['skill'] = self.skill + if hasattr(self, 'parameters') and self.parameters is not None: + _dict['parameters'] = self.parameters + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ClientAction object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ClientAction') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ClientAction') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class SkillEnum(str, Enum): + """ + The skill that is requesting the action. Included only if **type**=`client`. + """ + + MAIN_SKILL = 'main skill' + ACTIONS_SKILL = 'actions skill' + + +class CreateAssistantReleaseImportResponse: + """ + CreateAssistantReleaseImportResponse. + + :param str status: (optional) The current status of the artifact import process: + - **Failed**: The asynchronous artifact import process has failed. + - **Processing**: An asynchronous operation to import artifact is underway and + not yet completed. + :param str task_id: (optional) A unique identifier for a background asynchronous + task that is executing or has executed the operation. + :param str assistant_id: (optional) The ID of the assistant to which the release + belongs. + :param List[str] skill_impact_in_draft: (optional) An array of skill types in + the draft environment which will be overridden with skills from the artifact + being imported. + :param datetime created: (optional) The timestamp for creation of the object. + :param datetime updated: (optional) The timestamp for the most recent update to + the object. + """ + + def __init__( + self, + *, + status: Optional[str] = None, + task_id: Optional[str] = None, + assistant_id: Optional[str] = None, + skill_impact_in_draft: Optional[List[str]] = None, + created: Optional[datetime] = None, + updated: Optional[datetime] = None, + ) -> None: + """ + Initialize a CreateAssistantReleaseImportResponse object. + + :param List[str] skill_impact_in_draft: (optional) An array of skill types + in the draft environment which will be overridden with skills from the + artifact being imported. + """ + self.status = status + self.task_id = task_id + self.assistant_id = assistant_id + self.skill_impact_in_draft = skill_impact_in_draft + self.created = created + self.updated = updated + + @classmethod + def from_dict(cls, _dict: Dict) -> 'CreateAssistantReleaseImportResponse': + """Initialize a CreateAssistantReleaseImportResponse object from a json dictionary.""" + args = {} + if (status := _dict.get('status')) is not None: + args['status'] = status + if (task_id := _dict.get('task_id')) is not None: + args['task_id'] = task_id + if (assistant_id := _dict.get('assistant_id')) is not None: + args['assistant_id'] = assistant_id + if (skill_impact_in_draft := + _dict.get('skill_impact_in_draft')) is not None: + args['skill_impact_in_draft'] = skill_impact_in_draft + if (created := _dict.get('created')) is not None: + args['created'] = string_to_datetime(created) + if (updated := _dict.get('updated')) is not None: + args['updated'] = string_to_datetime(updated) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a CreateAssistantReleaseImportResponse object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'status') and getattr(self, 'status') is not None: + _dict['status'] = getattr(self, 'status') + if hasattr(self, 'task_id') and getattr(self, 'task_id') is not None: + _dict['task_id'] = getattr(self, 'task_id') + if hasattr(self, 'assistant_id') and getattr( + self, 'assistant_id') is not None: + _dict['assistant_id'] = getattr(self, 'assistant_id') + if hasattr(self, 'skill_impact_in_draft' + ) and self.skill_impact_in_draft is not None: + _dict['skill_impact_in_draft'] = self.skill_impact_in_draft + if hasattr(self, 'created') and getattr(self, 'created') is not None: + _dict['created'] = datetime_to_string(getattr(self, 'created')) + if hasattr(self, 'updated') and getattr(self, 'updated') is not None: + _dict['updated'] = datetime_to_string(getattr(self, 'updated')) + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this CreateAssistantReleaseImportResponse object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'CreateAssistantReleaseImportResponse') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'CreateAssistantReleaseImportResponse') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class StatusEnum(str, Enum): + """ + The current status of the artifact import process: + - **Failed**: The asynchronous artifact import process has failed. + - **Processing**: An asynchronous operation to import artifact is underway and + not yet completed. + """ + + FAILED = 'Failed' + PROCESSING = 'Processing' + + class SkillImpactInDraftEnum(str, Enum): + """ + The type of the skill in the draft environment. + """ + + ACTION = 'action' + DIALOG = 'dialog' + + +class CreateReleaseExportWithStatusErrors: + """ + CreateReleaseExportWithStatusErrors. + + :param str status: (optional) The current status of the release export creation + process: + - **Available**: The release export package is available for download. + - **Failed**: The asynchronous release export package creation process has + failed. + - **Processing**: An asynchronous operation to create the release export + package is underway and not yet completed. + :param str task_id: (optional) A unique identifier for a background asynchronous + task that is executing or has executed the operation. + :param str assistant_id: (optional) The ID of the assistant to which the release + belongs. + :param str release: (optional) The name of the release. The name is the version + number (an integer), returned as a string. + :param datetime created: (optional) The timestamp for creation of the object. + :param datetime updated: (optional) The timestamp for the most recent update to + the object. + :param List[StatusError] status_errors: (optional) An array of messages about + errors that caused an asynchronous operation to fail. Included only if + **status**=`Failed`. + :param str status_description: (optional) The description of the failed + asynchronous operation. Included only if **status**=`Failed`. + """ + + def __init__( + self, + *, + status: Optional[str] = None, + task_id: Optional[str] = None, + assistant_id: Optional[str] = None, + release: Optional[str] = None, + created: Optional[datetime] = None, + updated: Optional[datetime] = None, + status_errors: Optional[List['StatusError']] = None, + status_description: Optional[str] = None, + ) -> None: + """ + Initialize a CreateReleaseExportWithStatusErrors object. + + """ + self.status = status + self.task_id = task_id + self.assistant_id = assistant_id + self.release = release + self.created = created + self.updated = updated + self.status_errors = status_errors + self.status_description = status_description + + @classmethod + def from_dict(cls, _dict: Dict) -> 'CreateReleaseExportWithStatusErrors': + """Initialize a CreateReleaseExportWithStatusErrors object from a json dictionary.""" + args = {} + if (status := _dict.get('status')) is not None: + args['status'] = status + if (task_id := _dict.get('task_id')) is not None: + args['task_id'] = task_id + if (assistant_id := _dict.get('assistant_id')) is not None: + args['assistant_id'] = assistant_id + if (release := _dict.get('release')) is not None: + args['release'] = release + if (created := _dict.get('created')) is not None: + args['created'] = string_to_datetime(created) + if (updated := _dict.get('updated')) is not None: + args['updated'] = string_to_datetime(updated) + if (status_errors := _dict.get('status_errors')) is not None: + args['status_errors'] = [ + StatusError.from_dict(v) for v in status_errors + ] + if (status_description := _dict.get('status_description')) is not None: + args['status_description'] = status_description + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a CreateReleaseExportWithStatusErrors object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'status') and getattr(self, 'status') is not None: + _dict['status'] = getattr(self, 'status') + if hasattr(self, 'task_id') and getattr(self, 'task_id') is not None: + _dict['task_id'] = getattr(self, 'task_id') + if hasattr(self, 'assistant_id') and getattr( + self, 'assistant_id') is not None: + _dict['assistant_id'] = getattr(self, 'assistant_id') + if hasattr(self, 'release') and getattr(self, 'release') is not None: + _dict['release'] = getattr(self, 'release') + if hasattr(self, 'created') and getattr(self, 'created') is not None: + _dict['created'] = datetime_to_string(getattr(self, 'created')) + if hasattr(self, 'updated') and getattr(self, 'updated') is not None: + _dict['updated'] = datetime_to_string(getattr(self, 'updated')) + if hasattr(self, 'status_errors') and getattr( + self, 'status_errors') is not None: + status_errors_list = [] + for v in getattr(self, 'status_errors'): + if isinstance(v, dict): + status_errors_list.append(v) + else: + status_errors_list.append(v.to_dict()) + _dict['status_errors'] = status_errors_list + if hasattr(self, 'status_description') and getattr( + self, 'status_description') is not None: + _dict['status_description'] = getattr(self, 'status_description') + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this CreateReleaseExportWithStatusErrors object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'CreateReleaseExportWithStatusErrors') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'CreateReleaseExportWithStatusErrors') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class StatusEnum(str, Enum): + """ + The current status of the release export creation process: + - **Available**: The release export package is available for download. + - **Failed**: The asynchronous release export package creation process has + failed. + - **Processing**: An asynchronous operation to create the release export package + is underway and not yet completed. + """ + + AVAILABLE = 'Available' + FAILED = 'Failed' + PROCESSING = 'Processing' + + +class DialogLogMessage: + """ + Dialog log message details. + + :param str level: The severity of the log message. + :param str message: The text of the log message. + :param str code: A code that indicates the category to which the error message + belongs. + :param LogMessageSource source: (optional) An object that identifies the dialog + element that generated the error message. + """ + + def __init__( + self, + level: str, + message: str, + code: str, + *, + source: Optional['LogMessageSource'] = None, + ) -> None: + """ + Initialize a DialogLogMessage object. + + :param str level: The severity of the log message. + :param str message: The text of the log message. + :param str code: A code that indicates the category to which the error + message belongs. + :param LogMessageSource source: (optional) An object that identifies the + dialog element that generated the error message. + """ + self.level = level + self.message = message + self.code = code + self.source = source + + @classmethod + def from_dict(cls, _dict: Dict) -> 'DialogLogMessage': + """Initialize a DialogLogMessage object from a json dictionary.""" + args = {} + if (level := _dict.get('level')) is not None: + args['level'] = level + else: + raise ValueError( + 'Required property \'level\' not present in DialogLogMessage JSON' + ) + if (message := _dict.get('message')) is not None: + args['message'] = message + else: + raise ValueError( + 'Required property \'message\' not present in DialogLogMessage JSON' + ) + if (code := _dict.get('code')) is not None: + args['code'] = code + else: + raise ValueError( + 'Required property \'code\' not present in DialogLogMessage JSON' + ) + if (source := _dict.get('source')) is not None: + args['source'] = LogMessageSource.from_dict(source) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DialogLogMessage object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'level') and self.level is not None: + _dict['level'] = self.level + if hasattr(self, 'message') and self.message is not None: + _dict['message'] = self.message + if hasattr(self, 'code') and self.code is not None: + _dict['code'] = self.code + if hasattr(self, 'source') and self.source is not None: + if isinstance(self.source, dict): + _dict['source'] = self.source + else: + _dict['source'] = self.source.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DialogLogMessage object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'DialogLogMessage') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'DialogLogMessage') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class LevelEnum(str, Enum): + """ + The severity of the log message. + """ + + INFO = 'info' + ERROR = 'error' + WARN = 'warn' + + +class DialogNodeAction: + """ + DialogNodeAction. + + :param str name: The name of the action. + :param str type: (optional) The type of action to invoke. + :param dict parameters: (optional) A map of key/value pairs to be provided to + the action. + :param str result_variable: The location in the dialog context where the result + of the action is stored. + :param str credentials: (optional) The name of the context variable that the + client application will use to pass in credentials for the action. + """ + + def __init__( + self, + name: str, + result_variable: str, + *, + type: Optional[str] = None, + parameters: Optional[dict] = None, + credentials: Optional[str] = None, + ) -> None: + """ + Initialize a DialogNodeAction object. + + :param str name: The name of the action. + :param str result_variable: The location in the dialog context where the + result of the action is stored. + :param str type: (optional) The type of action to invoke. + :param dict parameters: (optional) A map of key/value pairs to be provided + to the action. + :param str credentials: (optional) The name of the context variable that + the client application will use to pass in credentials for the action. + """ + self.name = name + self.type = type + self.parameters = parameters + self.result_variable = result_variable + self.credentials = credentials + + @classmethod + def from_dict(cls, _dict: Dict) -> 'DialogNodeAction': + """Initialize a DialogNodeAction object from a json dictionary.""" + args = {} + if (name := _dict.get('name')) is not None: + args['name'] = name + else: + raise ValueError( + 'Required property \'name\' not present in DialogNodeAction JSON' + ) + if (type := _dict.get('type')) is not None: + args['type'] = type + if (parameters := _dict.get('parameters')) is not None: + args['parameters'] = parameters + if (result_variable := _dict.get('result_variable')) is not None: + args['result_variable'] = result_variable + else: + raise ValueError( + 'Required property \'result_variable\' not present in DialogNodeAction JSON' + ) + if (credentials := _dict.get('credentials')) is not None: + args['credentials'] = credentials + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DialogNodeAction object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'parameters') and self.parameters is not None: + _dict['parameters'] = self.parameters + if hasattr(self, + 'result_variable') and self.result_variable is not None: + _dict['result_variable'] = self.result_variable + if hasattr(self, 'credentials') and self.credentials is not None: + _dict['credentials'] = self.credentials + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DialogNodeAction object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'DialogNodeAction') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'DialogNodeAction') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class TypeEnum(str, Enum): + """ + The type of action to invoke. + """ + + CLIENT = 'client' + SERVER = 'server' + WEB_ACTION = 'web-action' + CLOUD_FUNCTION = 'cloud-function' + + +class DialogNodeOutputConnectToAgentTransferInfo: + """ + Routing or other contextual information to be used by target service desk systems. + + :param dict target: (optional) + """ + + def __init__( + self, + *, + target: Optional[dict] = None, + ) -> None: + """ + Initialize a DialogNodeOutputConnectToAgentTransferInfo object. + + :param dict target: (optional) + """ + self.target = target + + @classmethod + def from_dict(cls, + _dict: Dict) -> 'DialogNodeOutputConnectToAgentTransferInfo': + """Initialize a DialogNodeOutputConnectToAgentTransferInfo object from a json dictionary.""" + args = {} + if (target := _dict.get('target')) is not None: + args['target'] = target + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DialogNodeOutputConnectToAgentTransferInfo object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'target') and self.target is not None: + _dict['target'] = self.target + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DialogNodeOutputConnectToAgentTransferInfo object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, + other: 'DialogNodeOutputConnectToAgentTransferInfo') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, + other: 'DialogNodeOutputConnectToAgentTransferInfo') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class DialogNodeOutputOptionsElement: + """ + DialogNodeOutputOptionsElement. + + :param str label: The user-facing label for the option. + :param DialogNodeOutputOptionsElementValue value: An object defining the message + input to be sent to the assistant if the user selects the corresponding option. + """ + + def __init__( + self, + label: str, + value: 'DialogNodeOutputOptionsElementValue', + ) -> None: + """ + Initialize a DialogNodeOutputOptionsElement object. + + :param str label: The user-facing label for the option. + :param DialogNodeOutputOptionsElementValue value: An object defining the + message input to be sent to the assistant if the user selects the + corresponding option. + """ + self.label = label + self.value = value + + @classmethod + def from_dict(cls, _dict: Dict) -> 'DialogNodeOutputOptionsElement': + """Initialize a DialogNodeOutputOptionsElement object from a json dictionary.""" + args = {} + if (label := _dict.get('label')) is not None: + args['label'] = label + else: + raise ValueError( + 'Required property \'label\' not present in DialogNodeOutputOptionsElement JSON' + ) + if (value := _dict.get('value')) is not None: + args['value'] = DialogNodeOutputOptionsElementValue.from_dict(value) + else: + raise ValueError( + 'Required property \'value\' not present in DialogNodeOutputOptionsElement JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DialogNodeOutputOptionsElement object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'label') and self.label is not None: + _dict['label'] = self.label + if hasattr(self, 'value') and self.value is not None: + if isinstance(self.value, dict): + _dict['value'] = self.value + else: + _dict['value'] = self.value.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DialogNodeOutputOptionsElement object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'DialogNodeOutputOptionsElement') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'DialogNodeOutputOptionsElement') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class DialogNodeOutputOptionsElementValue: + """ + An object defining the message input to be sent to the assistant if the user selects + the corresponding option. + + :param MessageInput input: (optional) An input object that includes the input + text. + """ + + def __init__( + self, + *, + input: Optional['MessageInput'] = None, + ) -> None: + """ + Initialize a DialogNodeOutputOptionsElementValue object. + + :param MessageInput input: (optional) An input object that includes the + input text. + """ + self.input = input + + @classmethod + def from_dict(cls, _dict: Dict) -> 'DialogNodeOutputOptionsElementValue': + """Initialize a DialogNodeOutputOptionsElementValue object from a json dictionary.""" + args = {} + if (input := _dict.get('input')) is not None: + args['input'] = MessageInput.from_dict(input) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DialogNodeOutputOptionsElementValue object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'input') and self.input is not None: + if isinstance(self.input, dict): + _dict['input'] = self.input + else: + _dict['input'] = self.input.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DialogNodeOutputOptionsElementValue object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'DialogNodeOutputOptionsElementValue') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'DialogNodeOutputOptionsElementValue') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class DialogNodeVisited: + """ + An objects containing detailed diagnostic information about a dialog node that was + visited during processing of the input message. + + :param str dialog_node: (optional) A dialog node that was visited during + processing of the input message. + :param str title: (optional) The title of the dialog node. + :param str conditions: (optional) The conditions that trigger the dialog node. + """ + + def __init__( + self, + *, + dialog_node: Optional[str] = None, + title: Optional[str] = None, + conditions: Optional[str] = None, + ) -> None: + """ + Initialize a DialogNodeVisited object. + + :param str dialog_node: (optional) A dialog node that was visited during + processing of the input message. + :param str title: (optional) The title of the dialog node. + :param str conditions: (optional) The conditions that trigger the dialog + node. + """ + self.dialog_node = dialog_node + self.title = title + self.conditions = conditions + + @classmethod + def from_dict(cls, _dict: Dict) -> 'DialogNodeVisited': + """Initialize a DialogNodeVisited object from a json dictionary.""" + args = {} + if (dialog_node := _dict.get('dialog_node')) is not None: + args['dialog_node'] = dialog_node + if (title := _dict.get('title')) is not None: + args['title'] = title + if (conditions := _dict.get('conditions')) is not None: + args['conditions'] = conditions + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DialogNodeVisited object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'dialog_node') and self.dialog_node is not None: + _dict['dialog_node'] = self.dialog_node + if hasattr(self, 'title') and self.title is not None: + _dict['title'] = self.title + if hasattr(self, 'conditions') and self.conditions is not None: + _dict['conditions'] = self.conditions + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DialogNodeVisited object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'DialogNodeVisited') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'DialogNodeVisited') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class DialogSuggestion: + """ + DialogSuggestion. + + :param str label: The user-facing label for the suggestion. This label is taken + from the **title** or **user_label** property of the corresponding dialog node, + depending on the disambiguation options. + :param DialogSuggestionValue value: An object defining the message input to be + sent to the assistant if the user selects the corresponding disambiguation + option. + **Note:** This entire message input object must be included in the request body + of the next message sent to the assistant. Do not modify or remove any of the + included properties. + :param dict output: (optional) The dialog output that will be returned from the + watsonx Assistant service if the user selects the corresponding option. + """ + + def __init__( + self, + label: str, + value: 'DialogSuggestionValue', + *, + output: Optional[dict] = None, + ) -> None: + """ + Initialize a DialogSuggestion object. + + :param str label: The user-facing label for the suggestion. This label is + taken from the **title** or **user_label** property of the corresponding + dialog node, depending on the disambiguation options. + :param DialogSuggestionValue value: An object defining the message input to + be sent to the assistant if the user selects the corresponding + disambiguation option. + **Note:** This entire message input object must be included in the request + body of the next message sent to the assistant. Do not modify or remove any + of the included properties. + :param dict output: (optional) The dialog output that will be returned from + the watsonx Assistant service if the user selects the corresponding option. + """ + self.label = label + self.value = value + self.output = output + + @classmethod + def from_dict(cls, _dict: Dict) -> 'DialogSuggestion': + """Initialize a DialogSuggestion object from a json dictionary.""" + args = {} + if (label := _dict.get('label')) is not None: + args['label'] = label + else: + raise ValueError( + 'Required property \'label\' not present in DialogSuggestion JSON' + ) + if (value := _dict.get('value')) is not None: + args['value'] = DialogSuggestionValue.from_dict(value) + else: + raise ValueError( + 'Required property \'value\' not present in DialogSuggestion JSON' + ) + if (output := _dict.get('output')) is not None: + args['output'] = output + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DialogSuggestion object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'label') and self.label is not None: + _dict['label'] = self.label + if hasattr(self, 'value') and self.value is not None: + if isinstance(self.value, dict): + _dict['value'] = self.value + else: + _dict['value'] = self.value.to_dict() + if hasattr(self, 'output') and self.output is not None: + _dict['output'] = self.output + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DialogSuggestion object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'DialogSuggestion') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'DialogSuggestion') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class DialogSuggestionValue: + """ + An object defining the message input to be sent to the assistant if the user selects + the corresponding disambiguation option. + **Note:** This entire message input object must be included in the request body of + the next message sent to the assistant. Do not modify or remove any of the included + properties. + + :param MessageInput input: (optional) An input object that includes the input + text. + """ + + def __init__( + self, + *, + input: Optional['MessageInput'] = None, + ) -> None: + """ + Initialize a DialogSuggestionValue object. + + :param MessageInput input: (optional) An input object that includes the + input text. + """ + self.input = input + + @classmethod + def from_dict(cls, _dict: Dict) -> 'DialogSuggestionValue': + """Initialize a DialogSuggestionValue object from a json dictionary.""" + args = {} + if (input := _dict.get('input')) is not None: + args['input'] = MessageInput.from_dict(input) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DialogSuggestionValue object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'input') and self.input is not None: + if isinstance(self.input, dict): + _dict['input'] = self.input + else: + _dict['input'] = self.input.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DialogSuggestionValue object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'DialogSuggestionValue') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'DialogSuggestionValue') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class DtmfCommandInfo: + """ + DtmfCommandInfo. + + :param str type: Specifies the type of DTMF command for the phone integration. + :param dict parameters: (optional) Parameters specified by the command type. + """ + + def __init__( + self, + type: str, + *, + parameters: Optional[dict] = None, + ) -> None: + """ + Initialize a DtmfCommandInfo object. + + :param str type: Specifies the type of DTMF command for the phone + integration. + :param dict parameters: (optional) Parameters specified by the command + type. + """ + self.type = type + self.parameters = parameters + + @classmethod + def from_dict(cls, _dict: Dict) -> 'DtmfCommandInfo': + """Initialize a DtmfCommandInfo object from a json dictionary.""" + args = {} + if (type := _dict.get('type')) is not None: + args['type'] = type + else: + raise ValueError( + 'Required property \'type\' not present in DtmfCommandInfo JSON' + ) + if (parameters := _dict.get('parameters')) is not None: + args['parameters'] = parameters + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DtmfCommandInfo object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'parameters') and self.parameters is not None: + _dict['parameters'] = self.parameters + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DtmfCommandInfo object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'DtmfCommandInfo') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'DtmfCommandInfo') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class TypeEnum(str, Enum): + """ + Specifies the type of DTMF command for the phone integration. + """ + + COLLECT = 'collect' + DISABLE_BARGE_IN = 'disable_barge_in' + ENABLE_BARGE_IN = 'enable_barge_in' + SEND = 'send' + + +class Environment: + """ + Environment. + + :param str name: (optional) The name of the environment. + :param str description: (optional) The description of the environment. + :param str assistant_id: (optional) The assistant ID of the assistant the + environment is associated with. + :param str environment_id: (optional) The environment ID of the environment. + :param str environment: (optional) The type of the environment. All environments + other than the `draft` and `live` environments have the type `staging`. + :param BaseEnvironmentReleaseReference release_reference: (optional) An object + describing the release that is currently deployed in the environment. + :param BaseEnvironmentOrchestration orchestration: The search skill + orchestration settings for the environment. + :param int session_timeout: The session inactivity timeout setting for the + environment (in seconds). + :param List[IntegrationReference] integration_references: (optional) An array of + objects describing the integrations that exist in the environment. + :param List[EnvironmentSkill] skill_references: An array of objects identifying + the skills (such as action and dialog) that exist in the environment. + :param datetime created: (optional) The timestamp for creation of the object. + :param datetime updated: (optional) The timestamp for the most recent update to + the object. + """ + + def __init__( + self, + orchestration: 'BaseEnvironmentOrchestration', + session_timeout: int, + skill_references: List['EnvironmentSkill'], + *, + name: Optional[str] = None, + description: Optional[str] = None, + assistant_id: Optional[str] = None, + environment_id: Optional[str] = None, + environment: Optional[str] = None, + release_reference: Optional['BaseEnvironmentReleaseReference'] = None, + integration_references: Optional[List['IntegrationReference']] = None, + created: Optional[datetime] = None, + updated: Optional[datetime] = None, + ) -> None: + """ + Initialize a Environment object. + + :param BaseEnvironmentOrchestration orchestration: The search skill + orchestration settings for the environment. + :param int session_timeout: The session inactivity timeout setting for the + environment (in seconds). + :param List[EnvironmentSkill] skill_references: An array of objects + identifying the skills (such as action and dialog) that exist in the + environment. + :param str name: (optional) The name of the environment. + :param str description: (optional) The description of the environment. + """ + self.name = name + self.description = description + self.assistant_id = assistant_id + self.environment_id = environment_id + self.environment = environment + self.release_reference = release_reference + self.orchestration = orchestration + self.session_timeout = session_timeout + self.integration_references = integration_references + self.skill_references = skill_references + self.created = created + self.updated = updated + + @classmethod + def from_dict(cls, _dict: Dict) -> 'Environment': + """Initialize a Environment object from a json dictionary.""" + args = {} + if (name := _dict.get('name')) is not None: + args['name'] = name + if (description := _dict.get('description')) is not None: + args['description'] = description + if (assistant_id := _dict.get('assistant_id')) is not None: + args['assistant_id'] = assistant_id + if (environment_id := _dict.get('environment_id')) is not None: + args['environment_id'] = environment_id + if (environment := _dict.get('environment')) is not None: + args['environment'] = environment + if (release_reference := _dict.get('release_reference')) is not None: + args[ + 'release_reference'] = BaseEnvironmentReleaseReference.from_dict( + release_reference) + if (orchestration := _dict.get('orchestration')) is not None: + args['orchestration'] = BaseEnvironmentOrchestration.from_dict( + orchestration) + else: + raise ValueError( + 'Required property \'orchestration\' not present in Environment JSON' + ) + if (session_timeout := _dict.get('session_timeout')) is not None: + args['session_timeout'] = session_timeout + else: + raise ValueError( + 'Required property \'session_timeout\' not present in Environment JSON' + ) + if (integration_references := + _dict.get('integration_references')) is not None: + args['integration_references'] = [ + IntegrationReference.from_dict(v) + for v in integration_references + ] + if (skill_references := _dict.get('skill_references')) is not None: + args['skill_references'] = [ + EnvironmentSkill.from_dict(v) for v in skill_references + ] + else: + raise ValueError( + 'Required property \'skill_references\' not present in Environment JSON' + ) + if (created := _dict.get('created')) is not None: + args['created'] = string_to_datetime(created) + if (updated := _dict.get('updated')) is not None: + args['updated'] = string_to_datetime(updated) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Environment object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'assistant_id') and getattr( + self, 'assistant_id') is not None: + _dict['assistant_id'] = getattr(self, 'assistant_id') + if hasattr(self, 'environment_id') and getattr( + self, 'environment_id') is not None: + _dict['environment_id'] = getattr(self, 'environment_id') + if hasattr(self, 'environment') and getattr(self, + 'environment') is not None: + _dict['environment'] = getattr(self, 'environment') + if hasattr(self, 'release_reference') and getattr( + self, 'release_reference') is not None: + if isinstance(getattr(self, 'release_reference'), dict): + _dict['release_reference'] = getattr(self, 'release_reference') + else: + _dict['release_reference'] = getattr( + self, 'release_reference').to_dict() + if hasattr(self, 'orchestration') and self.orchestration is not None: + if isinstance(self.orchestration, dict): + _dict['orchestration'] = self.orchestration + else: + _dict['orchestration'] = self.orchestration.to_dict() + if hasattr(self, + 'session_timeout') and self.session_timeout is not None: + _dict['session_timeout'] = self.session_timeout + if hasattr(self, 'integration_references') and getattr( + self, 'integration_references') is not None: + integration_references_list = [] + for v in getattr(self, 'integration_references'): + if isinstance(v, dict): + integration_references_list.append(v) + else: + integration_references_list.append(v.to_dict()) + _dict['integration_references'] = integration_references_list + if hasattr(self, + 'skill_references') and self.skill_references is not None: + skill_references_list = [] + for v in self.skill_references: + if isinstance(v, dict): + skill_references_list.append(v) + else: + skill_references_list.append(v.to_dict()) + _dict['skill_references'] = skill_references_list + if hasattr(self, 'created') and getattr(self, 'created') is not None: + _dict['created'] = datetime_to_string(getattr(self, 'created')) + if hasattr(self, 'updated') and getattr(self, 'updated') is not None: + _dict['updated'] = datetime_to_string(getattr(self, 'updated')) + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Environment object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'Environment') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'Environment') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class EnvironmentCollection: + """ + EnvironmentCollection. + + :param List[Environment] environments: An array of objects describing the + environments associated with an assistant. + :param Pagination pagination: The pagination data for the returned objects. For + more information about using pagination, see [Pagination](#pagination). + """ + + def __init__( + self, + environments: List['Environment'], + pagination: 'Pagination', + ) -> None: + """ + Initialize a EnvironmentCollection object. + + :param List[Environment] environments: An array of objects describing the + environments associated with an assistant. + :param Pagination pagination: The pagination data for the returned objects. + For more information about using pagination, see [Pagination](#pagination). + """ + self.environments = environments + self.pagination = pagination + + @classmethod + def from_dict(cls, _dict: Dict) -> 'EnvironmentCollection': + """Initialize a EnvironmentCollection object from a json dictionary.""" + args = {} + if (environments := _dict.get('environments')) is not None: + args['environments'] = [ + Environment.from_dict(v) for v in environments + ] + else: + raise ValueError( + 'Required property \'environments\' not present in EnvironmentCollection JSON' + ) + if (pagination := _dict.get('pagination')) is not None: + args['pagination'] = Pagination.from_dict(pagination) + else: + raise ValueError( + 'Required property \'pagination\' not present in EnvironmentCollection JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a EnvironmentCollection object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'environments') and self.environments is not None: + environments_list = [] + for v in self.environments: + if isinstance(v, dict): + environments_list.append(v) + else: + environments_list.append(v.to_dict()) + _dict['environments'] = environments_list + if hasattr(self, 'pagination') and self.pagination is not None: + if isinstance(self.pagination, dict): + _dict['pagination'] = self.pagination + else: + _dict['pagination'] = self.pagination.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this EnvironmentCollection object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'EnvironmentCollection') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'EnvironmentCollection') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class EnvironmentReference: + """ + EnvironmentReference. + + :param str name: (optional) The name of the environment. + :param str environment_id: (optional) The unique identifier of the environment. + :param str environment: (optional) The type of the environment. All environments + other than the draft and live environments have the type `staging`. + """ + + def __init__( + self, + *, + name: Optional[str] = None, + environment_id: Optional[str] = None, + environment: Optional[str] = None, + ) -> None: + """ + Initialize a EnvironmentReference object. + + :param str name: (optional) The name of the environment. + """ + self.name = name + self.environment_id = environment_id + self.environment = environment + + @classmethod + def from_dict(cls, _dict: Dict) -> 'EnvironmentReference': + """Initialize a EnvironmentReference object from a json dictionary.""" + args = {} + if (name := _dict.get('name')) is not None: + args['name'] = name + if (environment_id := _dict.get('environment_id')) is not None: + args['environment_id'] = environment_id + if (environment := _dict.get('environment')) is not None: + args['environment'] = environment + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a EnvironmentReference object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'environment_id') and getattr( + self, 'environment_id') is not None: + _dict['environment_id'] = getattr(self, 'environment_id') + if hasattr(self, 'environment') and getattr(self, + 'environment') is not None: + _dict['environment'] = getattr(self, 'environment') + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this EnvironmentReference object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'EnvironmentReference') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'EnvironmentReference') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class EnvironmentEnum(str, Enum): + """ + The type of the environment. All environments other than the draft and live + environments have the type `staging`. + """ + + DRAFT = 'draft' + LIVE = 'live' + STAGING = 'staging' + + +class EnvironmentSkill: + """ + EnvironmentSkill. + + :param str skill_id: The skill ID of the skill. + :param str type: (optional) The type of the skill. + :param bool disabled: (optional) Whether the skill is disabled. A disabled skill + in the draft environment does not handle any messages at run time, and it is not + included in saved releases. + :param str snapshot: (optional) The name of the skill snapshot that is deployed + to the environment (for example, `draft` or `1`). + :param str skill_reference: (optional) The type of skill identified by the skill + reference. The possible values are `main skill` (for a dialog skill), `actions + skill`, and `search skill`. + """ + + def __init__( + self, + skill_id: str, + *, + type: Optional[str] = None, + disabled: Optional[bool] = None, + snapshot: Optional[str] = None, + skill_reference: Optional[str] = None, + ) -> None: + """ + Initialize a EnvironmentSkill object. + + :param str skill_id: The skill ID of the skill. + :param str type: (optional) The type of the skill. + :param bool disabled: (optional) Whether the skill is disabled. A disabled + skill in the draft environment does not handle any messages at run time, + and it is not included in saved releases. + :param str snapshot: (optional) The name of the skill snapshot that is + deployed to the environment (for example, `draft` or `1`). + :param str skill_reference: (optional) The type of skill identified by the + skill reference. The possible values are `main skill` (for a dialog skill), + `actions skill`, and `search skill`. + """ + self.skill_id = skill_id + self.type = type + self.disabled = disabled + self.snapshot = snapshot + self.skill_reference = skill_reference + + @classmethod + def from_dict(cls, _dict: Dict) -> 'EnvironmentSkill': + """Initialize a EnvironmentSkill object from a json dictionary.""" + args = {} + if (skill_id := _dict.get('skill_id')) is not None: + args['skill_id'] = skill_id + else: + raise ValueError( + 'Required property \'skill_id\' not present in EnvironmentSkill JSON' + ) + if (type := _dict.get('type')) is not None: + args['type'] = type + if (disabled := _dict.get('disabled')) is not None: + args['disabled'] = disabled + if (snapshot := _dict.get('snapshot')) is not None: + args['snapshot'] = snapshot + if (skill_reference := _dict.get('skill_reference')) is not None: + args['skill_reference'] = skill_reference + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a EnvironmentSkill object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'skill_id') and self.skill_id is not None: + _dict['skill_id'] = self.skill_id + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'disabled') and self.disabled is not None: + _dict['disabled'] = self.disabled + if hasattr(self, 'snapshot') and self.snapshot is not None: + _dict['snapshot'] = self.snapshot + if hasattr(self, + 'skill_reference') and self.skill_reference is not None: + _dict['skill_reference'] = self.skill_reference + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this EnvironmentSkill object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'EnvironmentSkill') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'EnvironmentSkill') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class TypeEnum(str, Enum): + """ + The type of the skill. + """ + + DIALOG = 'dialog' + ACTION = 'action' + SEARCH = 'search' + + +class FinalResponse: + """ + Message final response content. + + :param FinalResponseOutput output: (optional) Assistant output to be rendered or + processed by the client. + :param MessageContext context: (optional) Context data for the conversation. You + can use this property to access context variables. The context is stored by the + assistant on a per-session basis. + **Note:** The context is included in message responses only if + **return_context**=`true` in the message request. Full context is always + included in logs. + :param str user_id: (optional) A string value that identifies the user who is + interacting with the assistant. The client must provide a unique identifier for + each individual end user who accesses the application. For user-based plans, + this user ID is used to identify unique users for billing purposes. This string + cannot contain carriage return, newline, or tab characters. If no value is + specified in the input, **user_id** is automatically set to the value of + **context.global.session_id**. + **Note:** This property is the same as the **user_id** property in the global + system context. + :param MessageOutput masked_output: (optional) Assistant output to be rendered + or processed by the client. All private data is masked or removed. + :param MessageInput masked_input: (optional) An input object that includes the + input text. All private data is masked or removed. + """ + + def __init__( + self, + *, + output: Optional['FinalResponseOutput'] = None, + context: Optional['MessageContext'] = None, + user_id: Optional[str] = None, + masked_output: Optional['MessageOutput'] = None, + masked_input: Optional['MessageInput'] = None, + ) -> None: + """ + Initialize a FinalResponse object. + + :param FinalResponseOutput output: (optional) Assistant output to be + rendered or processed by the client. + :param MessageContext context: (optional) Context data for the + conversation. You can use this property to access context variables. The + context is stored by the assistant on a per-session basis. + **Note:** The context is included in message responses only if + **return_context**=`true` in the message request. Full context is always + included in logs. + :param str user_id: (optional) A string value that identifies the user who + is interacting with the assistant. The client must provide a unique + identifier for each individual end user who accesses the application. For + user-based plans, this user ID is used to identify unique users for billing + purposes. This string cannot contain carriage return, newline, or tab + characters. If no value is specified in the input, **user_id** is + automatically set to the value of **context.global.session_id**. + **Note:** This property is the same as the **user_id** property in the + global system context. + :param MessageOutput masked_output: (optional) Assistant output to be + rendered or processed by the client. All private data is masked or removed. + :param MessageInput masked_input: (optional) An input object that includes + the input text. All private data is masked or removed. + """ + self.output = output + self.context = context + self.user_id = user_id + self.masked_output = masked_output + self.masked_input = masked_input + + @classmethod + def from_dict(cls, _dict: Dict) -> 'FinalResponse': + """Initialize a FinalResponse object from a json dictionary.""" + args = {} + if (output := _dict.get('output')) is not None: + args['output'] = FinalResponseOutput.from_dict(output) + if (context := _dict.get('context')) is not None: + args['context'] = MessageContext.from_dict(context) + if (user_id := _dict.get('user_id')) is not None: + args['user_id'] = user_id + if (masked_output := _dict.get('masked_output')) is not None: + args['masked_output'] = MessageOutput.from_dict(masked_output) + if (masked_input := _dict.get('masked_input')) is not None: + args['masked_input'] = MessageInput.from_dict(masked_input) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a FinalResponse object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'output') and self.output is not None: + if isinstance(self.output, dict): + _dict['output'] = self.output + else: + _dict['output'] = self.output.to_dict() + if hasattr(self, 'context') and self.context is not None: + if isinstance(self.context, dict): + _dict['context'] = self.context + else: + _dict['context'] = self.context.to_dict() + if hasattr(self, 'user_id') and self.user_id is not None: + _dict['user_id'] = self.user_id + if hasattr(self, 'masked_output') and self.masked_output is not None: + if isinstance(self.masked_output, dict): + _dict['masked_output'] = self.masked_output + else: + _dict['masked_output'] = self.masked_output.to_dict() + if hasattr(self, 'masked_input') and self.masked_input is not None: + if isinstance(self.masked_input, dict): + _dict['masked_input'] = self.masked_input + else: + _dict['masked_input'] = self.masked_input.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this FinalResponse object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'FinalResponse') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'FinalResponse') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class FinalResponseOutput: + """ + Assistant output to be rendered or processed by the client. + + :param List[RuntimeResponseGeneric] generic: (optional) Output intended for any + channel. It is the responsibility of the client application to implement the + supported response types. + :param List[RuntimeIntent] intents: (optional) An array of intents recognized in + the user input, sorted in descending order of confidence. + :param List[RuntimeEntity] entities: (optional) An array of entities identified + in the user input. + :param List[DialogNodeAction] actions: (optional) An array of objects describing + any actions requested by the dialog node. + :param MessageOutputDebug debug: (optional) Additional detailed information + about a message response and how it was generated. + :param dict user_defined: (optional) An object containing any custom properties + included in the response. This object includes any arbitrary properties defined + in the dialog JSON editor as part of the dialog node output. + :param MessageOutputSpelling spelling: (optional) Properties describing any + spelling corrections in the user input that was received. + :param List[MessageOutputLLMMetadata] llm_metadata: (optional) An array of + objects that provide information about calls to large language models that + occured as part of handling this message. + :param MessageStreamMetadata streaming_metadata: Contains meta-information about + the item(s) being streamed. + """ + + def __init__( + self, + streaming_metadata: 'MessageStreamMetadata', + *, + generic: Optional[List['RuntimeResponseGeneric']] = None, + intents: Optional[List['RuntimeIntent']] = None, + entities: Optional[List['RuntimeEntity']] = None, + actions: Optional[List['DialogNodeAction']] = None, + debug: Optional['MessageOutputDebug'] = None, + user_defined: Optional[dict] = None, + spelling: Optional['MessageOutputSpelling'] = None, + llm_metadata: Optional[List['MessageOutputLLMMetadata']] = None, + ) -> None: + """ + Initialize a FinalResponseOutput object. + + :param MessageStreamMetadata streaming_metadata: Contains meta-information + about the item(s) being streamed. + :param List[RuntimeResponseGeneric] generic: (optional) Output intended for + any channel. It is the responsibility of the client application to + implement the supported response types. + :param List[RuntimeIntent] intents: (optional) An array of intents + recognized in the user input, sorted in descending order of confidence. + :param List[RuntimeEntity] entities: (optional) An array of entities + identified in the user input. + :param List[DialogNodeAction] actions: (optional) An array of objects + describing any actions requested by the dialog node. + :param MessageOutputDebug debug: (optional) Additional detailed information + about a message response and how it was generated. + :param dict user_defined: (optional) An object containing any custom + properties included in the response. This object includes any arbitrary + properties defined in the dialog JSON editor as part of the dialog node + output. + :param MessageOutputSpelling spelling: (optional) Properties describing any + spelling corrections in the user input that was received. + :param List[MessageOutputLLMMetadata] llm_metadata: (optional) An array of + objects that provide information about calls to large language models that + occured as part of handling this message. + """ + self.generic = generic + self.intents = intents + self.entities = entities + self.actions = actions + self.debug = debug + self.user_defined = user_defined + self.spelling = spelling + self.llm_metadata = llm_metadata + self.streaming_metadata = streaming_metadata + + @classmethod + def from_dict(cls, _dict: Dict) -> 'FinalResponseOutput': + """Initialize a FinalResponseOutput object from a json dictionary.""" + args = {} + if (generic := _dict.get('generic')) is not None: + args['generic'] = [ + RuntimeResponseGeneric.from_dict(v) for v in generic + ] + if (intents := _dict.get('intents')) is not None: + args['intents'] = [RuntimeIntent.from_dict(v) for v in intents] + if (entities := _dict.get('entities')) is not None: + args['entities'] = [RuntimeEntity.from_dict(v) for v in entities] + if (actions := _dict.get('actions')) is not None: + args['actions'] = [DialogNodeAction.from_dict(v) for v in actions] + if (debug := _dict.get('debug')) is not None: + args['debug'] = MessageOutputDebug.from_dict(debug) + if (user_defined := _dict.get('user_defined')) is not None: + args['user_defined'] = user_defined + if (spelling := _dict.get('spelling')) is not None: + args['spelling'] = MessageOutputSpelling.from_dict(spelling) + if (llm_metadata := _dict.get('llm_metadata')) is not None: + args['llm_metadata'] = [ + MessageOutputLLMMetadata.from_dict(v) for v in llm_metadata + ] + if (streaming_metadata := _dict.get('streaming_metadata')) is not None: + args['streaming_metadata'] = MessageStreamMetadata.from_dict( + streaming_metadata) + else: + raise ValueError( + 'Required property \'streaming_metadata\' not present in FinalResponseOutput JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a FinalResponseOutput object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'generic') and self.generic is not None: + generic_list = [] + for v in self.generic: + if isinstance(v, dict): + generic_list.append(v) + else: + generic_list.append(v.to_dict()) + _dict['generic'] = generic_list + if hasattr(self, 'intents') and self.intents is not None: + intents_list = [] + for v in self.intents: + if isinstance(v, dict): + intents_list.append(v) + else: + intents_list.append(v.to_dict()) + _dict['intents'] = intents_list + if hasattr(self, 'entities') and self.entities is not None: + entities_list = [] + for v in self.entities: + if isinstance(v, dict): + entities_list.append(v) + else: + entities_list.append(v.to_dict()) + _dict['entities'] = entities_list + if hasattr(self, 'actions') and self.actions is not None: + actions_list = [] + for v in self.actions: + if isinstance(v, dict): + actions_list.append(v) + else: + actions_list.append(v.to_dict()) + _dict['actions'] = actions_list + if hasattr(self, 'debug') and self.debug is not None: + if isinstance(self.debug, dict): + _dict['debug'] = self.debug + else: + _dict['debug'] = self.debug.to_dict() + if hasattr(self, 'user_defined') and self.user_defined is not None: + _dict['user_defined'] = self.user_defined + if hasattr(self, 'spelling') and self.spelling is not None: + if isinstance(self.spelling, dict): + _dict['spelling'] = self.spelling + else: + _dict['spelling'] = self.spelling.to_dict() + if hasattr(self, 'llm_metadata') and self.llm_metadata is not None: + llm_metadata_list = [] + for v in self.llm_metadata: + if isinstance(v, dict): + llm_metadata_list.append(v) + else: + llm_metadata_list.append(v.to_dict()) + _dict['llm_metadata'] = llm_metadata_list + if hasattr( + self, + 'streaming_metadata') and self.streaming_metadata is not None: + if isinstance(self.streaming_metadata, dict): + _dict['streaming_metadata'] = self.streaming_metadata + else: + _dict['streaming_metadata'] = self.streaming_metadata.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this FinalResponseOutput object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'FinalResponseOutput') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'FinalResponseOutput') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class GenerativeAITask: + """ + GenerativeAITask. + + """ + + def __init__(self,) -> None: + """ + Initialize a GenerativeAITask object. + + """ + msg = "Cannot instantiate base class. Instead, instantiate one of the defined subclasses: {0}".format( + ", ".join([ + 'GenerativeAITaskContentGroundedAnswering', + 'GenerativeAITaskGeneralPurposeAnswering' + ])) + raise Exception(msg) + + @classmethod + def from_dict(cls, _dict: Dict) -> 'GenerativeAITask': + """Initialize a GenerativeAITask object from a json dictionary.""" + disc_class = cls._get_class_by_discriminator(_dict) + if disc_class != cls: + return disc_class.from_dict(_dict) + msg = "Cannot convert dictionary into an instance of base class 'GenerativeAITask'. The discriminator value should map to a valid subclass: {1}".format( + ", ".join([ + 'GenerativeAITaskContentGroundedAnswering', + 'GenerativeAITaskGeneralPurposeAnswering' + ])) + raise Exception(msg) + + @classmethod + def _from_dict(cls, _dict: Dict): + """Initialize a GenerativeAITask object from a json dictionary.""" + return cls.from_dict(_dict) + + @classmethod + def _get_class_by_discriminator(cls, _dict: Dict) -> object: + mapping = {} + mapping[ + 'content_grounded_answering'] = 'GenerativeAITaskContentGroundedAnswering' + mapping[ + 'general_purpose_answering'] = 'GenerativeAITaskGeneralPurposeAnswering' + disc_value = _dict.get('task') + if disc_value is None: + raise ValueError( + 'Discriminator property \'task\' not found in GenerativeAITask JSON' + ) + class_name = mapping.get(disc_value, disc_value) + try: + disc_class = getattr(sys.modules[__name__], class_name) + except AttributeError: + disc_class = cls + if isinstance(disc_class, object): + return disc_class + raise TypeError('%s is not a discriminator class' % class_name) + + +class GenerativeAITaskConfidenceScores: + """ + The confidence scores for determining whether to show the generated response or an “I + don't know” response. + + :param float pre_gen: (optional) The confidence score based on user query and + search results. + :param float pre_gen_threshold: (optional) The pre_gen confidence score + threshold. If the pre_gen score is below this threshold, it shows an “I don't + know” response instead of the generated response. Shown in the conversational + search skill UI as the “Retrieval Confidence threshold”. + :param float post_gen: (optional) The confidence score based on user query, + search results, and the generated response. + :param float post_gen_threshold: (optional) The post_gen confidence score + threshold. If the post_gen score is below this threshold, it shows an “I don't + know” response instead of the generated response. Shown in the conversational + search skill UI as the “Response Confidence threshold”. + """ + + def __init__( + self, + *, + pre_gen: Optional[float] = None, + pre_gen_threshold: Optional[float] = None, + post_gen: Optional[float] = None, + post_gen_threshold: Optional[float] = None, + ) -> None: + """ + Initialize a GenerativeAITaskConfidenceScores object. + + :param float pre_gen: (optional) The confidence score based on user query + and search results. + :param float pre_gen_threshold: (optional) The pre_gen confidence score + threshold. If the pre_gen score is below this threshold, it shows an “I + don't know” response instead of the generated response. Shown in the + conversational search skill UI as the “Retrieval Confidence threshold”. + :param float post_gen: (optional) The confidence score based on user query, + search results, and the generated response. + :param float post_gen_threshold: (optional) The post_gen confidence score + threshold. If the post_gen score is below this threshold, it shows an “I + don't know” response instead of the generated response. Shown in the + conversational search skill UI as the “Response Confidence threshold”. + """ + self.pre_gen = pre_gen + self.pre_gen_threshold = pre_gen_threshold + self.post_gen = post_gen + self.post_gen_threshold = post_gen_threshold + + @classmethod + def from_dict(cls, _dict: Dict) -> 'GenerativeAITaskConfidenceScores': + """Initialize a GenerativeAITaskConfidenceScores object from a json dictionary.""" + args = {} + if (pre_gen := _dict.get('pre_gen')) is not None: + args['pre_gen'] = pre_gen + if (pre_gen_threshold := _dict.get('pre_gen_threshold')) is not None: + args['pre_gen_threshold'] = pre_gen_threshold + if (post_gen := _dict.get('post_gen')) is not None: + args['post_gen'] = post_gen + if (post_gen_threshold := _dict.get('post_gen_threshold')) is not None: + args['post_gen_threshold'] = post_gen_threshold + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a GenerativeAITaskConfidenceScores object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'pre_gen') and self.pre_gen is not None: + _dict['pre_gen'] = self.pre_gen + if hasattr(self, + 'pre_gen_threshold') and self.pre_gen_threshold is not None: + _dict['pre_gen_threshold'] = self.pre_gen_threshold + if hasattr(self, 'post_gen') and self.post_gen is not None: + _dict['post_gen'] = self.post_gen + if hasattr( + self, + 'post_gen_threshold') and self.post_gen_threshold is not None: + _dict['post_gen_threshold'] = self.post_gen_threshold + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this GenerativeAITaskConfidenceScores object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'GenerativeAITaskConfidenceScores') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'GenerativeAITaskConfidenceScores') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class IntegrationReference: + """ + IntegrationReference. + + :param str integration_id: (optional) The integration ID of the integration. + :param str type: (optional) The type of the integration. + """ + + def __init__( + self, + *, + integration_id: Optional[str] = None, + type: Optional[str] = None, + ) -> None: + """ + Initialize a IntegrationReference object. + + :param str integration_id: (optional) The integration ID of the + integration. + :param str type: (optional) The type of the integration. + """ + self.integration_id = integration_id + self.type = type + + @classmethod + def from_dict(cls, _dict: Dict) -> 'IntegrationReference': + """Initialize a IntegrationReference object from a json dictionary.""" + args = {} + if (integration_id := _dict.get('integration_id')) is not None: + args['integration_id'] = integration_id + if (type := _dict.get('type')) is not None: + args['type'] = type + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a IntegrationReference object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'integration_id') and self.integration_id is not None: + _dict['integration_id'] = self.integration_id + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this IntegrationReference object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'IntegrationReference') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'IntegrationReference') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Log: + """ + Log. + + :param str log_id: A unique identifier for the logged event. + :param LogRequest request: A message request formatted for the watsonx Assistant + service. + :param LogResponse response: A response from the watsonx Assistant service. + :param str assistant_id: Unique identifier of the assistant. + :param str session_id: The ID of the session the message was part of. + :param str skill_id: The unique identifier of the skill that responded to the + message. + :param str snapshot: The name of the snapshot (dialog skill version) that + responded to the message (for example, `draft`). + :param str request_timestamp: The timestamp for receipt of the message. + :param str response_timestamp: The timestamp for the system response to the + message. + :param str language: The language of the assistant to which the message request + was made. + :param str customer_id: (optional) The customer ID specified for the message, if + any. + """ + + def __init__( + self, + log_id: str, + request: 'LogRequest', + response: 'LogResponse', + assistant_id: str, + session_id: str, + skill_id: str, + snapshot: str, + request_timestamp: str, + response_timestamp: str, + language: str, + *, + customer_id: Optional[str] = None, + ) -> None: + """ + Initialize a Log object. + + :param str log_id: A unique identifier for the logged event. + :param LogRequest request: A message request formatted for the watsonx + Assistant service. + :param LogResponse response: A response from the watsonx Assistant service. + :param str assistant_id: Unique identifier of the assistant. + :param str session_id: The ID of the session the message was part of. + :param str skill_id: The unique identifier of the skill that responded to + the message. + :param str snapshot: The name of the snapshot (dialog skill version) that + responded to the message (for example, `draft`). + :param str request_timestamp: The timestamp for receipt of the message. + :param str response_timestamp: The timestamp for the system response to the + message. + :param str language: The language of the assistant to which the message + request was made. + :param str customer_id: (optional) The customer ID specified for the + message, if any. + """ + self.log_id = log_id + self.request = request + self.response = response + self.assistant_id = assistant_id + self.session_id = session_id + self.skill_id = skill_id + self.snapshot = snapshot + self.request_timestamp = request_timestamp + self.response_timestamp = response_timestamp + self.language = language + self.customer_id = customer_id + + @classmethod + def from_dict(cls, _dict: Dict) -> 'Log': + """Initialize a Log object from a json dictionary.""" + args = {} + if (log_id := _dict.get('log_id')) is not None: + args['log_id'] = log_id + else: + raise ValueError( + 'Required property \'log_id\' not present in Log JSON') + if (request := _dict.get('request')) is not None: + args['request'] = LogRequest.from_dict(request) + else: + raise ValueError( + 'Required property \'request\' not present in Log JSON') + if (response := _dict.get('response')) is not None: + args['response'] = LogResponse.from_dict(response) + else: + raise ValueError( + 'Required property \'response\' not present in Log JSON') + if (assistant_id := _dict.get('assistant_id')) is not None: + args['assistant_id'] = assistant_id + else: + raise ValueError( + 'Required property \'assistant_id\' not present in Log JSON') + if (session_id := _dict.get('session_id')) is not None: + args['session_id'] = session_id + else: + raise ValueError( + 'Required property \'session_id\' not present in Log JSON') + if (skill_id := _dict.get('skill_id')) is not None: + args['skill_id'] = skill_id + else: + raise ValueError( + 'Required property \'skill_id\' not present in Log JSON') + if (snapshot := _dict.get('snapshot')) is not None: + args['snapshot'] = snapshot + else: + raise ValueError( + 'Required property \'snapshot\' not present in Log JSON') + if (request_timestamp := _dict.get('request_timestamp')) is not None: + args['request_timestamp'] = request_timestamp + else: + raise ValueError( + 'Required property \'request_timestamp\' not present in Log JSON' + ) + if (response_timestamp := _dict.get('response_timestamp')) is not None: + args['response_timestamp'] = response_timestamp + else: + raise ValueError( + 'Required property \'response_timestamp\' not present in Log JSON' + ) + if (language := _dict.get('language')) is not None: + args['language'] = language + else: + raise ValueError( + 'Required property \'language\' not present in Log JSON') + if (customer_id := _dict.get('customer_id')) is not None: + args['customer_id'] = customer_id + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Log object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'log_id') and self.log_id is not None: + _dict['log_id'] = self.log_id + if hasattr(self, 'request') and self.request is not None: + if isinstance(self.request, dict): + _dict['request'] = self.request + else: + _dict['request'] = self.request.to_dict() + if hasattr(self, 'response') and self.response is not None: + if isinstance(self.response, dict): + _dict['response'] = self.response + else: + _dict['response'] = self.response.to_dict() + if hasattr(self, 'assistant_id') and self.assistant_id is not None: + _dict['assistant_id'] = self.assistant_id + if hasattr(self, 'session_id') and self.session_id is not None: + _dict['session_id'] = self.session_id + if hasattr(self, 'skill_id') and self.skill_id is not None: + _dict['skill_id'] = self.skill_id + if hasattr(self, 'snapshot') and self.snapshot is not None: + _dict['snapshot'] = self.snapshot + if hasattr(self, + 'request_timestamp') and self.request_timestamp is not None: + _dict['request_timestamp'] = self.request_timestamp + if hasattr( + self, + 'response_timestamp') and self.response_timestamp is not None: + _dict['response_timestamp'] = self.response_timestamp + if hasattr(self, 'language') and self.language is not None: + _dict['language'] = self.language + if hasattr(self, 'customer_id') and self.customer_id is not None: + _dict['customer_id'] = self.customer_id + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Log object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'Log') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'Log') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class LogCollection: + """ + LogCollection. + + :param List[Log] logs: An array of objects describing log events. + :param LogPagination pagination: The pagination data for the returned objects. + For more information about using pagination, see [Pagination](#pagination). + """ + + def __init__( + self, + logs: List['Log'], + pagination: 'LogPagination', + ) -> None: + """ + Initialize a LogCollection object. + + :param List[Log] logs: An array of objects describing log events. + :param LogPagination pagination: The pagination data for the returned + objects. For more information about using pagination, see + [Pagination](#pagination). + """ + self.logs = logs + self.pagination = pagination + + @classmethod + def from_dict(cls, _dict: Dict) -> 'LogCollection': + """Initialize a LogCollection object from a json dictionary.""" + args = {} + if (logs := _dict.get('logs')) is not None: + args['logs'] = [Log.from_dict(v) for v in logs] + else: + raise ValueError( + 'Required property \'logs\' not present in LogCollection JSON') + if (pagination := _dict.get('pagination')) is not None: + args['pagination'] = LogPagination.from_dict(pagination) + else: + raise ValueError( + 'Required property \'pagination\' not present in LogCollection JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a LogCollection object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'logs') and self.logs is not None: + logs_list = [] + for v in self.logs: + if isinstance(v, dict): + logs_list.append(v) + else: + logs_list.append(v.to_dict()) + _dict['logs'] = logs_list + if hasattr(self, 'pagination') and self.pagination is not None: + if isinstance(self.pagination, dict): + _dict['pagination'] = self.pagination + else: + _dict['pagination'] = self.pagination.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this LogCollection object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'LogCollection') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'LogCollection') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class LogMessageSource: + """ + An object that identifies the dialog element that generated the error message. + + """ + + def __init__(self,) -> None: + """ + Initialize a LogMessageSource object. + + """ + msg = "Cannot instantiate base class. Instead, instantiate one of the defined subclasses: {0}".format( + ", ".join([ + 'LogMessageSourceDialogNode', 'LogMessageSourceAction', + 'LogMessageSourceStep', 'LogMessageSourceHandler' + ])) + raise Exception(msg) + + @classmethod + def from_dict(cls, _dict: Dict) -> 'LogMessageSource': + """Initialize a LogMessageSource object from a json dictionary.""" + disc_class = cls._get_class_by_discriminator(_dict) + if disc_class != cls: + return disc_class.from_dict(_dict) + msg = "Cannot convert dictionary into an instance of base class 'LogMessageSource'. The discriminator value should map to a valid subclass: {1}".format( + ", ".join([ + 'LogMessageSourceDialogNode', 'LogMessageSourceAction', + 'LogMessageSourceStep', 'LogMessageSourceHandler' + ])) + raise Exception(msg) + + @classmethod + def _from_dict(cls, _dict: Dict): + """Initialize a LogMessageSource object from a json dictionary.""" + return cls.from_dict(_dict) + + @classmethod + def _get_class_by_discriminator(cls, _dict: Dict) -> object: + mapping = {} + mapping['dialog_node'] = 'LogMessageSourceDialogNode' + mapping['action'] = 'LogMessageSourceAction' + mapping['step'] = 'LogMessageSourceStep' + mapping['handler'] = 'LogMessageSourceHandler' + disc_value = _dict.get('type') + if disc_value is None: + raise ValueError( + 'Discriminator property \'type\' not found in LogMessageSource JSON' + ) + class_name = mapping.get(disc_value, disc_value) + try: + disc_class = getattr(sys.modules[__name__], class_name) + except AttributeError: + disc_class = cls + if isinstance(disc_class, object): + return disc_class + raise TypeError('%s is not a discriminator class' % class_name) + + +class LogPagination: + """ + The pagination data for the returned objects. For more information about using + pagination, see [Pagination](#pagination). + + :param str next_url: (optional) The URL that will return the next page of + results, if any. + :param int matched: (optional) Reserved for future use. + :param str next_cursor: (optional) A token identifying the next page of results. + """ + + def __init__( + self, + *, + next_url: Optional[str] = None, + matched: Optional[int] = None, + next_cursor: Optional[str] = None, + ) -> None: + """ + Initialize a LogPagination object. + + :param str next_url: (optional) The URL that will return the next page of + results, if any. + :param int matched: (optional) Reserved for future use. + :param str next_cursor: (optional) A token identifying the next page of + results. + """ + self.next_url = next_url + self.matched = matched + self.next_cursor = next_cursor + + @classmethod + def from_dict(cls, _dict: Dict) -> 'LogPagination': + """Initialize a LogPagination object from a json dictionary.""" + args = {} + if (next_url := _dict.get('next_url')) is not None: + args['next_url'] = next_url + if (matched := _dict.get('matched')) is not None: + args['matched'] = matched + if (next_cursor := _dict.get('next_cursor')) is not None: + args['next_cursor'] = next_cursor + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a LogPagination object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'next_url') and self.next_url is not None: + _dict['next_url'] = self.next_url + if hasattr(self, 'matched') and self.matched is not None: + _dict['matched'] = self.matched + if hasattr(self, 'next_cursor') and self.next_cursor is not None: + _dict['next_cursor'] = self.next_cursor + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this LogPagination object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'LogPagination') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'LogPagination') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class LogRequest: + """ + A message request formatted for the watsonx Assistant service. + + :param LogRequestInput input: (optional) An input object that includes the input + text. All private data is masked or removed. + :param MessageContext context: (optional) Context data for the conversation. You + can use this property to set or modify context variables, which can also be + accessed by dialog nodes. The context is stored by the assistant on a + per-session basis. + **Note:** The total size of the context data stored for a stateful session + cannot exceed 100KB. + :param str user_id: (optional) A string value that identifies the user who is + interacting with the assistant. The client must provide a unique identifier for + each individual end user who accesses the application. For user-based plans, + this user ID is used to identify unique users for billing purposes. This string + cannot contain carriage return, newline, or tab characters. If no value is + specified in the input, **user_id** is automatically set to the value of + **context.global.session_id**. + **Note:** This property is the same as the **user_id** property in the global + system context. If **user_id** is specified in both locations, the value + specified at the root is used. + """ + + def __init__( + self, + *, + input: Optional['LogRequestInput'] = None, + context: Optional['MessageContext'] = None, + user_id: Optional[str] = None, + ) -> None: + """ + Initialize a LogRequest object. + + :param LogRequestInput input: (optional) An input object that includes the + input text. All private data is masked or removed. + :param MessageContext context: (optional) Context data for the + conversation. You can use this property to set or modify context variables, + which can also be accessed by dialog nodes. The context is stored by the + assistant on a per-session basis. + **Note:** The total size of the context data stored for a stateful session + cannot exceed 100KB. + :param str user_id: (optional) A string value that identifies the user who + is interacting with the assistant. The client must provide a unique + identifier for each individual end user who accesses the application. For + user-based plans, this user ID is used to identify unique users for billing + purposes. This string cannot contain carriage return, newline, or tab + characters. If no value is specified in the input, **user_id** is + automatically set to the value of **context.global.session_id**. + **Note:** This property is the same as the **user_id** property in the + global system context. If **user_id** is specified in both locations, the + value specified at the root is used. + """ + self.input = input + self.context = context + self.user_id = user_id + + @classmethod + def from_dict(cls, _dict: Dict) -> 'LogRequest': + """Initialize a LogRequest object from a json dictionary.""" + args = {} + if (input := _dict.get('input')) is not None: + args['input'] = LogRequestInput.from_dict(input) + if (context := _dict.get('context')) is not None: + args['context'] = MessageContext.from_dict(context) + if (user_id := _dict.get('user_id')) is not None: + args['user_id'] = user_id + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a LogRequest object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'input') and self.input is not None: + if isinstance(self.input, dict): + _dict['input'] = self.input + else: + _dict['input'] = self.input.to_dict() + if hasattr(self, 'context') and self.context is not None: + if isinstance(self.context, dict): + _dict['context'] = self.context + else: + _dict['context'] = self.context.to_dict() + if hasattr(self, 'user_id') and self.user_id is not None: + _dict['user_id'] = self.user_id + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this LogRequest object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'LogRequest') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'LogRequest') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class LogRequestInput: + """ + An input object that includes the input text. All private data is masked or removed. + + :param str message_type: (optional) The type of the message: + - `text`: The user input is processed normally by the assistant. + - `search`: Only search results are returned. (Any dialog or action skill is + bypassed.) + **Note:** A `search` message results in an error if no search skill is + configured for the assistant. + :param str text: (optional) The text of the user input. This string cannot + contain carriage return, newline, or tab characters. + :param List[RuntimeIntent] intents: (optional) Intents to use when evaluating + the user input. Include intents from the previous response to continue using + those intents rather than trying to recognize intents in the new input. + :param List[RuntimeEntity] entities: (optional) Entities to use when evaluating + the message. Include entities from the previous response to continue using those + entities rather than detecting entities in the new input. + :param str suggestion_id: (optional) For internal use only. + :param List[MessageInputAttachment] attachments: (optional) An array of + multimedia attachments to be sent with the message. Attachments are not + processed by the assistant itself, but can be sent to external services by + webhooks. + **Note:** Attachments are not supported on IBM Cloud Pak for Data. + :param RequestAnalytics analytics: (optional) An optional object containing + analytics data. Currently, this data is used only for events sent to the Segment + extension. + :param MessageInputOptions options: (optional) Optional properties that control + how the assistant responds. + """ + + def __init__( + self, + *, + message_type: Optional[str] = None, + text: Optional[str] = None, + intents: Optional[List['RuntimeIntent']] = None, + entities: Optional[List['RuntimeEntity']] = None, + suggestion_id: Optional[str] = None, + attachments: Optional[List['MessageInputAttachment']] = None, + analytics: Optional['RequestAnalytics'] = None, + options: Optional['MessageInputOptions'] = None, + ) -> None: + """ + Initialize a LogRequestInput object. + + :param str message_type: (optional) The type of the message: + - `text`: The user input is processed normally by the assistant. + - `search`: Only search results are returned. (Any dialog or action skill + is bypassed.) + **Note:** A `search` message results in an error if no search skill is + configured for the assistant. + :param str text: (optional) The text of the user input. This string cannot + contain carriage return, newline, or tab characters. + :param List[RuntimeIntent] intents: (optional) Intents to use when + evaluating the user input. Include intents from the previous response to + continue using those intents rather than trying to recognize intents in the + new input. + :param List[RuntimeEntity] entities: (optional) Entities to use when + evaluating the message. Include entities from the previous response to + continue using those entities rather than detecting entities in the new + input. + :param str suggestion_id: (optional) For internal use only. + :param List[MessageInputAttachment] attachments: (optional) An array of + multimedia attachments to be sent with the message. Attachments are not + processed by the assistant itself, but can be sent to external services by + webhooks. + **Note:** Attachments are not supported on IBM Cloud Pak for Data. + :param RequestAnalytics analytics: (optional) An optional object containing + analytics data. Currently, this data is used only for events sent to the + Segment extension. + :param MessageInputOptions options: (optional) Optional properties that + control how the assistant responds. + """ + self.message_type = message_type + self.text = text + self.intents = intents + self.entities = entities + self.suggestion_id = suggestion_id + self.attachments = attachments + self.analytics = analytics + self.options = options + + @classmethod + def from_dict(cls, _dict: Dict) -> 'LogRequestInput': + """Initialize a LogRequestInput object from a json dictionary.""" + args = {} + if (message_type := _dict.get('message_type')) is not None: + args['message_type'] = message_type + if (text := _dict.get('text')) is not None: + args['text'] = text + if (intents := _dict.get('intents')) is not None: + args['intents'] = [RuntimeIntent.from_dict(v) for v in intents] + if (entities := _dict.get('entities')) is not None: + args['entities'] = [RuntimeEntity.from_dict(v) for v in entities] + if (suggestion_id := _dict.get('suggestion_id')) is not None: + args['suggestion_id'] = suggestion_id + if (attachments := _dict.get('attachments')) is not None: + args['attachments'] = [ + MessageInputAttachment.from_dict(v) for v in attachments + ] + if (analytics := _dict.get('analytics')) is not None: + args['analytics'] = RequestAnalytics.from_dict(analytics) + if (options := _dict.get('options')) is not None: + args['options'] = MessageInputOptions.from_dict(options) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a LogRequestInput object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'message_type') and self.message_type is not None: + _dict['message_type'] = self.message_type + if hasattr(self, 'text') and self.text is not None: + _dict['text'] = self.text + if hasattr(self, 'intents') and self.intents is not None: + intents_list = [] + for v in self.intents: + if isinstance(v, dict): + intents_list.append(v) + else: + intents_list.append(v.to_dict()) + _dict['intents'] = intents_list + if hasattr(self, 'entities') and self.entities is not None: + entities_list = [] + for v in self.entities: + if isinstance(v, dict): + entities_list.append(v) + else: + entities_list.append(v.to_dict()) + _dict['entities'] = entities_list + if hasattr(self, 'suggestion_id') and self.suggestion_id is not None: + _dict['suggestion_id'] = self.suggestion_id + if hasattr(self, 'attachments') and self.attachments is not None: + attachments_list = [] + for v in self.attachments: + if isinstance(v, dict): + attachments_list.append(v) + else: + attachments_list.append(v.to_dict()) + _dict['attachments'] = attachments_list + if hasattr(self, 'analytics') and self.analytics is not None: + if isinstance(self.analytics, dict): + _dict['analytics'] = self.analytics + else: + _dict['analytics'] = self.analytics.to_dict() + if hasattr(self, 'options') and self.options is not None: + if isinstance(self.options, dict): + _dict['options'] = self.options + else: + _dict['options'] = self.options.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this LogRequestInput object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'LogRequestInput') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'LogRequestInput') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class MessageTypeEnum(str, Enum): + """ + The type of the message: + - `text`: The user input is processed normally by the assistant. + - `search`: Only search results are returned. (Any dialog or action skill is + bypassed.) + **Note:** A `search` message results in an error if no search skill is configured + for the assistant. + """ + + TEXT = 'text' + SEARCH = 'search' + + +class LogResponse: + """ + A response from the watsonx Assistant service. + + :param LogResponseOutput output: Assistant output to be rendered or processed by + the client. All private data is masked or removed. + :param MessageContext context: (optional) Context data for the conversation. You + can use this property to access context variables. The context is stored by the + assistant on a per-session basis. + **Note:** The context is included in message responses only if + **return_context**=`true` in the message request. Full context is always + included in logs. + :param str user_id: A string value that identifies the user who is interacting + with the assistant. The client must provide a unique identifier for each + individual end user who accesses the application. For user-based plans, this + user ID is used to identify unique users for billing purposes. This string + cannot contain carriage return, newline, or tab characters. If no value is + specified in the input, **user_id** is automatically set to the value of + **context.global.session_id**. + **Note:** This property is the same as the **user_id** property in the global + system context. + """ + + def __init__( + self, + output: 'LogResponseOutput', + user_id: str, + *, + context: Optional['MessageContext'] = None, + ) -> None: + """ + Initialize a LogResponse object. + + :param LogResponseOutput output: Assistant output to be rendered or + processed by the client. All private data is masked or removed. + :param str user_id: A string value that identifies the user who is + interacting with the assistant. The client must provide a unique identifier + for each individual end user who accesses the application. For user-based + plans, this user ID is used to identify unique users for billing purposes. + This string cannot contain carriage return, newline, or tab characters. If + no value is specified in the input, **user_id** is automatically set to the + value of **context.global.session_id**. + **Note:** This property is the same as the **user_id** property in the + global system context. + :param MessageContext context: (optional) Context data for the + conversation. You can use this property to access context variables. The + context is stored by the assistant on a per-session basis. + **Note:** The context is included in message responses only if + **return_context**=`true` in the message request. Full context is always + included in logs. + """ + self.output = output + self.context = context + self.user_id = user_id + + @classmethod + def from_dict(cls, _dict: Dict) -> 'LogResponse': + """Initialize a LogResponse object from a json dictionary.""" + args = {} + if (output := _dict.get('output')) is not None: + args['output'] = LogResponseOutput.from_dict(output) + else: + raise ValueError( + 'Required property \'output\' not present in LogResponse JSON') + if (context := _dict.get('context')) is not None: + args['context'] = MessageContext.from_dict(context) + if (user_id := _dict.get('user_id')) is not None: + args['user_id'] = user_id + else: + raise ValueError( + 'Required property \'user_id\' not present in LogResponse JSON') + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a LogResponse object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'output') and self.output is not None: + if isinstance(self.output, dict): + _dict['output'] = self.output + else: + _dict['output'] = self.output.to_dict() + if hasattr(self, 'context') and self.context is not None: + if isinstance(self.context, dict): + _dict['context'] = self.context + else: + _dict['context'] = self.context.to_dict() + if hasattr(self, 'user_id') and self.user_id is not None: + _dict['user_id'] = self.user_id + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this LogResponse object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'LogResponse') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'LogResponse') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class LogResponseOutput: + """ + Assistant output to be rendered or processed by the client. All private data is masked + or removed. + + :param List[RuntimeResponseGeneric] generic: (optional) Output intended for any + channel. It is the responsibility of the client application to implement the + supported response types. + :param List[RuntimeIntent] intents: (optional) An array of intents recognized in + the user input, sorted in descending order of confidence. + :param List[RuntimeEntity] entities: (optional) An array of entities identified + in the user input. + :param List[DialogNodeAction] actions: (optional) An array of objects describing + any actions requested by the dialog node. + :param MessageOutputDebug debug: (optional) Additional detailed information + about a message response and how it was generated. + :param dict user_defined: (optional) An object containing any custom properties + included in the response. This object includes any arbitrary properties defined + in the dialog JSON editor as part of the dialog node output. + :param MessageOutputSpelling spelling: (optional) Properties describing any + spelling corrections in the user input that was received. + :param List[MessageOutputLLMMetadata] llm_metadata: (optional) An array of + objects that provide information about calls to large language models that + occured as part of handling this message. + """ + + def __init__( + self, + *, + generic: Optional[List['RuntimeResponseGeneric']] = None, + intents: Optional[List['RuntimeIntent']] = None, + entities: Optional[List['RuntimeEntity']] = None, + actions: Optional[List['DialogNodeAction']] = None, + debug: Optional['MessageOutputDebug'] = None, + user_defined: Optional[dict] = None, + spelling: Optional['MessageOutputSpelling'] = None, + llm_metadata: Optional[List['MessageOutputLLMMetadata']] = None, + ) -> None: + """ + Initialize a LogResponseOutput object. + + :param List[RuntimeResponseGeneric] generic: (optional) Output intended for + any channel. It is the responsibility of the client application to + implement the supported response types. + :param List[RuntimeIntent] intents: (optional) An array of intents + recognized in the user input, sorted in descending order of confidence. + :param List[RuntimeEntity] entities: (optional) An array of entities + identified in the user input. + :param List[DialogNodeAction] actions: (optional) An array of objects + describing any actions requested by the dialog node. + :param MessageOutputDebug debug: (optional) Additional detailed information + about a message response and how it was generated. + :param dict user_defined: (optional) An object containing any custom + properties included in the response. This object includes any arbitrary + properties defined in the dialog JSON editor as part of the dialog node + output. + :param MessageOutputSpelling spelling: (optional) Properties describing any + spelling corrections in the user input that was received. + :param List[MessageOutputLLMMetadata] llm_metadata: (optional) An array of + objects that provide information about calls to large language models that + occured as part of handling this message. + """ + self.generic = generic + self.intents = intents + self.entities = entities + self.actions = actions + self.debug = debug + self.user_defined = user_defined + self.spelling = spelling + self.llm_metadata = llm_metadata + + @classmethod + def from_dict(cls, _dict: Dict) -> 'LogResponseOutput': + """Initialize a LogResponseOutput object from a json dictionary.""" + args = {} + if (generic := _dict.get('generic')) is not None: + args['generic'] = [ + RuntimeResponseGeneric.from_dict(v) for v in generic + ] + if (intents := _dict.get('intents')) is not None: + args['intents'] = [RuntimeIntent.from_dict(v) for v in intents] + if (entities := _dict.get('entities')) is not None: + args['entities'] = [RuntimeEntity.from_dict(v) for v in entities] + if (actions := _dict.get('actions')) is not None: + args['actions'] = [DialogNodeAction.from_dict(v) for v in actions] + if (debug := _dict.get('debug')) is not None: + args['debug'] = MessageOutputDebug.from_dict(debug) + if (user_defined := _dict.get('user_defined')) is not None: + args['user_defined'] = user_defined + if (spelling := _dict.get('spelling')) is not None: + args['spelling'] = MessageOutputSpelling.from_dict(spelling) + if (llm_metadata := _dict.get('llm_metadata')) is not None: + args['llm_metadata'] = [ + MessageOutputLLMMetadata.from_dict(v) for v in llm_metadata + ] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a LogResponseOutput object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'generic') and self.generic is not None: + generic_list = [] + for v in self.generic: + if isinstance(v, dict): + generic_list.append(v) + else: + generic_list.append(v.to_dict()) + _dict['generic'] = generic_list + if hasattr(self, 'intents') and self.intents is not None: + intents_list = [] + for v in self.intents: + if isinstance(v, dict): + intents_list.append(v) + else: + intents_list.append(v.to_dict()) + _dict['intents'] = intents_list + if hasattr(self, 'entities') and self.entities is not None: + entities_list = [] + for v in self.entities: + if isinstance(v, dict): + entities_list.append(v) + else: + entities_list.append(v.to_dict()) + _dict['entities'] = entities_list + if hasattr(self, 'actions') and self.actions is not None: + actions_list = [] + for v in self.actions: + if isinstance(v, dict): + actions_list.append(v) + else: + actions_list.append(v.to_dict()) + _dict['actions'] = actions_list + if hasattr(self, 'debug') and self.debug is not None: + if isinstance(self.debug, dict): + _dict['debug'] = self.debug + else: + _dict['debug'] = self.debug.to_dict() + if hasattr(self, 'user_defined') and self.user_defined is not None: + _dict['user_defined'] = self.user_defined + if hasattr(self, 'spelling') and self.spelling is not None: + if isinstance(self.spelling, dict): + _dict['spelling'] = self.spelling + else: + _dict['spelling'] = self.spelling.to_dict() + if hasattr(self, 'llm_metadata') and self.llm_metadata is not None: + llm_metadata_list = [] + for v in self.llm_metadata: + if isinstance(v, dict): + llm_metadata_list.append(v) + else: + llm_metadata_list.append(v.to_dict()) + _dict['llm_metadata'] = llm_metadata_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this LogResponseOutput object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'LogResponseOutput') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'LogResponseOutput') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class MessageContext: + """ + MessageContext. + + :param MessageContextGlobal global_: (optional) Session context data that is + shared by all skills used by the assistant. + :param MessageContextSkills skills: (optional) Context data specific to + particular skills used by the assistant. + :param dict integrations: (optional) An object containing context data that is + specific to particular integrations. For more information, see the + [documentation](https://cloud.ibm.com/docs/assistant?topic=assistant-dialog-integrations). + """ + + def __init__( + self, + *, + global_: Optional['MessageContextGlobal'] = None, + skills: Optional['MessageContextSkills'] = None, + integrations: Optional[dict] = None, + ) -> None: + """ + Initialize a MessageContext object. + + :param MessageContextGlobal global_: (optional) Session context data that + is shared by all skills used by the assistant. + :param MessageContextSkills skills: (optional) Context data specific to + particular skills used by the assistant. + :param dict integrations: (optional) An object containing context data that + is specific to particular integrations. For more information, see the + [documentation](https://cloud.ibm.com/docs/assistant?topic=assistant-dialog-integrations). + """ + self.global_ = global_ + self.skills = skills + self.integrations = integrations + + @classmethod + def from_dict(cls, _dict: Dict) -> 'MessageContext': + """Initialize a MessageContext object from a json dictionary.""" + args = {} + if (global_ := _dict.get('global')) is not None: + args['global_'] = MessageContextGlobal.from_dict(global_) + if (skills := _dict.get('skills')) is not None: + args['skills'] = MessageContextSkills.from_dict(skills) + if (integrations := _dict.get('integrations')) is not None: + args['integrations'] = integrations + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageContext object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'global_') and self.global_ is not None: + if isinstance(self.global_, dict): + _dict['global'] = self.global_ + else: + _dict['global'] = self.global_.to_dict() + if hasattr(self, 'skills') and self.skills is not None: + if isinstance(self.skills, dict): + _dict['skills'] = self.skills + else: + _dict['skills'] = self.skills.to_dict() + if hasattr(self, 'integrations') and self.integrations is not None: + _dict['integrations'] = self.integrations + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MessageContext object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'MessageContext') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'MessageContext') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class MessageContextActionSkill: + """ + Context variables that are used by the action skill. Private variables are persisted, + but not shown. + + :param dict user_defined: (optional) An object containing any arbitrary + variables that can be read and written by a particular skill. + :param MessageContextSkillSystem system: (optional) System context data used by + the skill. + :param dict action_variables: (optional) An object containing action variables. + Action variables can be accessed only by steps in the same action, and do not + persist after the action ends. + :param dict skill_variables: (optional) An object containing skill variables. + (In the watsonx Assistant user interface, skill variables are called _session + variables_.) Skill variables can be accessed by any action and persist for the + duration of the session. + """ + + def __init__( + self, + *, + user_defined: Optional[dict] = None, + system: Optional['MessageContextSkillSystem'] = None, + action_variables: Optional[dict] = None, + skill_variables: Optional[dict] = None, + ) -> None: + """ + Initialize a MessageContextActionSkill object. + + :param dict user_defined: (optional) An object containing any arbitrary + variables that can be read and written by a particular skill. + :param MessageContextSkillSystem system: (optional) System context data + used by the skill. + :param dict action_variables: (optional) An object containing action + variables. Action variables can be accessed only by steps in the same + action, and do not persist after the action ends. + :param dict skill_variables: (optional) An object containing skill + variables. (In the watsonx Assistant user interface, skill variables are + called _session variables_.) Skill variables can be accessed by any action + and persist for the duration of the session. + """ + self.user_defined = user_defined + self.system = system + self.action_variables = action_variables + self.skill_variables = skill_variables + + @classmethod + def from_dict(cls, _dict: Dict) -> 'MessageContextActionSkill': + """Initialize a MessageContextActionSkill object from a json dictionary.""" + args = {} + if (user_defined := _dict.get('user_defined')) is not None: + args['user_defined'] = user_defined + if (system := _dict.get('system')) is not None: + args['system'] = MessageContextSkillSystem.from_dict(system) + if (action_variables := _dict.get('action_variables')) is not None: + args['action_variables'] = action_variables + if (skill_variables := _dict.get('skill_variables')) is not None: + args['skill_variables'] = skill_variables + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageContextActionSkill object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'user_defined') and self.user_defined is not None: + _dict['user_defined'] = self.user_defined + if hasattr(self, 'system') and self.system is not None: + if isinstance(self.system, dict): + _dict['system'] = self.system + else: + _dict['system'] = self.system.to_dict() + if hasattr(self, + 'action_variables') and self.action_variables is not None: + _dict['action_variables'] = self.action_variables + if hasattr(self, + 'skill_variables') and self.skill_variables is not None: + _dict['skill_variables'] = self.skill_variables + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MessageContextActionSkill object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'MessageContextActionSkill') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'MessageContextActionSkill') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class MessageContextDialogSkill: + """ + Context variables that are used by the dialog skill. + + :param dict user_defined: (optional) An object containing any arbitrary + variables that can be read and written by a particular skill. + :param MessageContextSkillSystem system: (optional) System context data used by + the skill. + """ + + def __init__( + self, + *, + user_defined: Optional[dict] = None, + system: Optional['MessageContextSkillSystem'] = None, + ) -> None: + """ + Initialize a MessageContextDialogSkill object. + + :param dict user_defined: (optional) An object containing any arbitrary + variables that can be read and written by a particular skill. + :param MessageContextSkillSystem system: (optional) System context data + used by the skill. + """ + self.user_defined = user_defined + self.system = system + + @classmethod + def from_dict(cls, _dict: Dict) -> 'MessageContextDialogSkill': + """Initialize a MessageContextDialogSkill object from a json dictionary.""" + args = {} + if (user_defined := _dict.get('user_defined')) is not None: + args['user_defined'] = user_defined + if (system := _dict.get('system')) is not None: + args['system'] = MessageContextSkillSystem.from_dict(system) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageContextDialogSkill object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'user_defined') and self.user_defined is not None: + _dict['user_defined'] = self.user_defined + if hasattr(self, 'system') and self.system is not None: + if isinstance(self.system, dict): + _dict['system'] = self.system + else: + _dict['system'] = self.system.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MessageContextDialogSkill object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'MessageContextDialogSkill') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'MessageContextDialogSkill') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class MessageContextGlobal: + """ + Session context data that is shared by all skills used by the assistant. + + :param MessageContextGlobalSystem system: (optional) Built-in system properties + that apply to all skills used by the assistant. + :param str session_id: (optional) The session ID. + """ + + def __init__( + self, + *, + system: Optional['MessageContextGlobalSystem'] = None, + session_id: Optional[str] = None, + ) -> None: + """ + Initialize a MessageContextGlobal object. + + :param MessageContextGlobalSystem system: (optional) Built-in system + properties that apply to all skills used by the assistant. + """ + self.system = system + self.session_id = session_id + + @classmethod + def from_dict(cls, _dict: Dict) -> 'MessageContextGlobal': + """Initialize a MessageContextGlobal object from a json dictionary.""" + args = {} + if (system := _dict.get('system')) is not None: + args['system'] = MessageContextGlobalSystem.from_dict(system) + if (session_id := _dict.get('session_id')) is not None: + args['session_id'] = session_id + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageContextGlobal object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'system') and self.system is not None: + if isinstance(self.system, dict): + _dict['system'] = self.system + else: + _dict['system'] = self.system.to_dict() + if hasattr(self, 'session_id') and getattr(self, + 'session_id') is not None: + _dict['session_id'] = getattr(self, 'session_id') + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MessageContextGlobal object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'MessageContextGlobal') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'MessageContextGlobal') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class MessageContextGlobalSystem: + """ + Built-in system properties that apply to all skills used by the assistant. + + :param str timezone: (optional) The user time zone. The assistant uses the time + zone to correctly resolve relative time references. + :param str user_id: (optional) A string value that identifies the user who is + interacting with the assistant. The client must provide a unique identifier for + each individual end user who accesses the application. For user-based plans, + this user ID is used to identify unique users for billing purposes. This string + cannot contain carriage return, newline, or tab characters. If no value is + specified in the input, **user_id** is automatically set to the value of + **context.global.session_id**. + **Note:** This property is the same as the **user_id** property at the root of + the message body. If **user_id** is specified in both locations in a message + request, the value specified at the root is used. + :param int turn_count: (optional) A counter that is automatically incremented + with each turn of the conversation. A value of 1 indicates that this is the the + first turn of a new conversation, which can affect the behavior of some skills + (for example, triggering the start node of a dialog). + :param str locale: (optional) The language code for localization in the user + input. The specified locale overrides the default for the assistant, and is used + for interpreting entity values in user input such as date values. For example, + `04/03/2018` might be interpreted either as April 3 or March 4, depending on the + locale. + This property is included only if the new system entities are enabled for the + skill. + :param str reference_time: (optional) The base time for interpreting any + relative time mentions in the user input. The specified time overrides the + current server time, and is used to calculate times mentioned in relative terms + such as `now` or `tomorrow`. This can be useful for simulating past or future + times for testing purposes, or when analyzing documents such as news articles. + This value must be a UTC time value formatted according to ISO 8601 (for + example, `2021-06-26T12:00:00Z` for noon UTC on 26 June 2021). + This property is included only if the new system entities are enabled for the + skill. + :param str session_start_time: (optional) The time at which the session started. + With the stateful `message` method, the start time is always present, and is set + by the service based on the time the session was created. With the stateless + `message` method, the start time is set by the service in the response to the + first message, and should be returned as part of the context with each + subsequent message in the session. + This value is a UTC time value formatted according to ISO 8601 (for example, + `2021-06-26T12:00:00Z` for noon UTC on 26 June 2021). + :param str state: (optional) An encoded string that represents the configuration + state of the assistant at the beginning of the conversation. If you are using + the stateless `message` method, save this value and then send it in the context + of the subsequent message request to avoid disruptions if there are + configuration changes during the conversation (such as a change to a skill the + assistant uses). + :param bool skip_user_input: (optional) For internal use only. + """ + + def __init__( + self, + *, + timezone: Optional[str] = None, + user_id: Optional[str] = None, + turn_count: Optional[int] = None, + locale: Optional[str] = None, + reference_time: Optional[str] = None, + session_start_time: Optional[str] = None, + state: Optional[str] = None, + skip_user_input: Optional[bool] = None, + ) -> None: + """ + Initialize a MessageContextGlobalSystem object. + + :param str timezone: (optional) The user time zone. The assistant uses the + time zone to correctly resolve relative time references. + :param str user_id: (optional) A string value that identifies the user who + is interacting with the assistant. The client must provide a unique + identifier for each individual end user who accesses the application. For + user-based plans, this user ID is used to identify unique users for billing + purposes. This string cannot contain carriage return, newline, or tab + characters. If no value is specified in the input, **user_id** is + automatically set to the value of **context.global.session_id**. + **Note:** This property is the same as the **user_id** property at the root + of the message body. If **user_id** is specified in both locations in a + message request, the value specified at the root is used. + :param int turn_count: (optional) A counter that is automatically + incremented with each turn of the conversation. A value of 1 indicates that + this is the the first turn of a new conversation, which can affect the + behavior of some skills (for example, triggering the start node of a + dialog). + :param str locale: (optional) The language code for localization in the + user input. The specified locale overrides the default for the assistant, + and is used for interpreting entity values in user input such as date + values. For example, `04/03/2018` might be interpreted either as April 3 or + March 4, depending on the locale. + This property is included only if the new system entities are enabled for + the skill. + :param str reference_time: (optional) The base time for interpreting any + relative time mentions in the user input. The specified time overrides the + current server time, and is used to calculate times mentioned in relative + terms such as `now` or `tomorrow`. This can be useful for simulating past + or future times for testing purposes, or when analyzing documents such as + news articles. + This value must be a UTC time value formatted according to ISO 8601 (for + example, `2021-06-26T12:00:00Z` for noon UTC on 26 June 2021). + This property is included only if the new system entities are enabled for + the skill. + :param str session_start_time: (optional) The time at which the session + started. With the stateful `message` method, the start time is always + present, and is set by the service based on the time the session was + created. With the stateless `message` method, the start time is set by the + service in the response to the first message, and should be returned as + part of the context with each subsequent message in the session. + This value is a UTC time value formatted according to ISO 8601 (for + example, `2021-06-26T12:00:00Z` for noon UTC on 26 June 2021). + :param str state: (optional) An encoded string that represents the + configuration state of the assistant at the beginning of the conversation. + If you are using the stateless `message` method, save this value and then + send it in the context of the subsequent message request to avoid + disruptions if there are configuration changes during the conversation + (such as a change to a skill the assistant uses). + :param bool skip_user_input: (optional) For internal use only. + """ + self.timezone = timezone + self.user_id = user_id + self.turn_count = turn_count + self.locale = locale + self.reference_time = reference_time + self.session_start_time = session_start_time + self.state = state + self.skip_user_input = skip_user_input + + @classmethod + def from_dict(cls, _dict: Dict) -> 'MessageContextGlobalSystem': + """Initialize a MessageContextGlobalSystem object from a json dictionary.""" + args = {} + if (timezone := _dict.get('timezone')) is not None: + args['timezone'] = timezone + if (user_id := _dict.get('user_id')) is not None: + args['user_id'] = user_id + if (turn_count := _dict.get('turn_count')) is not None: + args['turn_count'] = turn_count + if (locale := _dict.get('locale')) is not None: + args['locale'] = locale + if (reference_time := _dict.get('reference_time')) is not None: + args['reference_time'] = reference_time + if (session_start_time := _dict.get('session_start_time')) is not None: + args['session_start_time'] = session_start_time + if (state := _dict.get('state')) is not None: + args['state'] = state + if (skip_user_input := _dict.get('skip_user_input')) is not None: + args['skip_user_input'] = skip_user_input + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageContextGlobalSystem object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'timezone') and self.timezone is not None: + _dict['timezone'] = self.timezone + if hasattr(self, 'user_id') and self.user_id is not None: + _dict['user_id'] = self.user_id + if hasattr(self, 'turn_count') and self.turn_count is not None: + _dict['turn_count'] = self.turn_count + if hasattr(self, 'locale') and self.locale is not None: + _dict['locale'] = self.locale + if hasattr(self, 'reference_time') and self.reference_time is not None: + _dict['reference_time'] = self.reference_time + if hasattr( + self, + 'session_start_time') and self.session_start_time is not None: + _dict['session_start_time'] = self.session_start_time + if hasattr(self, 'state') and self.state is not None: + _dict['state'] = self.state + if hasattr(self, + 'skip_user_input') and self.skip_user_input is not None: + _dict['skip_user_input'] = self.skip_user_input + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MessageContextGlobalSystem object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'MessageContextGlobalSystem') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'MessageContextGlobalSystem') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class LocaleEnum(str, Enum): + """ + The language code for localization in the user input. The specified locale + overrides the default for the assistant, and is used for interpreting entity + values in user input such as date values. For example, `04/03/2018` might be + interpreted either as April 3 or March 4, depending on the locale. + This property is included only if the new system entities are enabled for the + skill. + """ + + EN_US = 'en-us' + EN_CA = 'en-ca' + EN_GB = 'en-gb' + AR_AR = 'ar-ar' + CS_CZ = 'cs-cz' + DE_DE = 'de-de' + ES_ES = 'es-es' + FR_FR = 'fr-fr' + IT_IT = 'it-it' + JA_JP = 'ja-jp' + KO_KR = 'ko-kr' + NL_NL = 'nl-nl' + PT_BR = 'pt-br' + ZH_CN = 'zh-cn' + ZH_TW = 'zh-tw' + + +class MessageContextSkillSystem: + """ + System context data used by the skill. + + :param str state: (optional) An encoded string that represents the current + conversation state. By saving this value and then sending it in the context of a + subsequent message request, you can return to an earlier point in the + conversation. If you are using stateful sessions, you can also use a stored + state value to restore a paused conversation whose session is expired. + + This type supports additional properties of type object. For internal use only. + """ + + # The set of defined properties for the class + _properties = frozenset(['state']) + + def __init__( + self, + *, + state: Optional[str] = None, + **kwargs: Optional[object], + ) -> None: + """ + Initialize a MessageContextSkillSystem object. + + :param str state: (optional) An encoded string that represents the current + conversation state. By saving this value and then sending it in the context + of a subsequent message request, you can return to an earlier point in the + conversation. If you are using stateful sessions, you can also use a stored + state value to restore a paused conversation whose session is expired. + :param object **kwargs: (optional) For internal use only. + """ + self.state = state + for k, v in kwargs.items(): + if k not in MessageContextSkillSystem._properties: + if not isinstance(v, object): + raise ValueError( + 'Value for additional property {} must be of type object' + .format(k)) + setattr(self, k, v) + else: + raise ValueError( + 'Property {} cannot be specified as an additional property'. + format(k)) + + @classmethod + def from_dict(cls, _dict: Dict) -> 'MessageContextSkillSystem': + """Initialize a MessageContextSkillSystem object from a json dictionary.""" + args = {} + if (state := _dict.get('state')) is not None: + args['state'] = state + for k, v in _dict.items(): + if k not in cls._properties: + if not isinstance(v, object): + raise ValueError( + 'Value for additional property {} must be of type object' + .format(k)) + args[k] = v + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageContextSkillSystem object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'state') and self.state is not None: + _dict['state'] = self.state + for k in [ + _k for _k in vars(self).keys() + if _k not in MessageContextSkillSystem._properties + ]: + _dict[k] = getattr(self, k) + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def get_properties(self) -> Dict: + """Return the additional properties from this instance of MessageContextSkillSystem in the form of a dict.""" + _dict = {} + for k in [ + _k for _k in vars(self).keys() + if _k not in MessageContextSkillSystem._properties + ]: + _dict[k] = getattr(self, k) + return _dict + + def set_properties(self, _dict: dict): + """Set a dictionary of additional properties in this instance of MessageContextSkillSystem""" + for k in [ + _k for _k in vars(self).keys() + if _k not in MessageContextSkillSystem._properties + ]: + delattr(self, k) + for k, v in _dict.items(): + if k not in MessageContextSkillSystem._properties: + if not isinstance(v, object): + raise ValueError( + 'Value for additional property {} must be of type object' + .format(k)) + setattr(self, k, v) + else: + raise ValueError( + 'Property {} cannot be specified as an additional property'. + format(k)) + + def __str__(self) -> str: + """Return a `str` version of this MessageContextSkillSystem object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'MessageContextSkillSystem') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'MessageContextSkillSystem') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class MessageContextSkills: + """ + Context data specific to particular skills used by the assistant. + + :param MessageContextDialogSkill main_skill: (optional) Context variables that + are used by the dialog skill. + :param MessageContextActionSkill actions_skill: (optional) Context variables + that are used by the action skill. Private variables are persisted, but not + shown. + """ + + def __init__( + self, + *, + main_skill: Optional['MessageContextDialogSkill'] = None, + actions_skill: Optional['MessageContextActionSkill'] = None, + ) -> None: + """ + Initialize a MessageContextSkills object. + + :param MessageContextDialogSkill main_skill: (optional) Context variables + that are used by the dialog skill. + :param MessageContextActionSkill actions_skill: (optional) Context + variables that are used by the action skill. Private variables are + persisted, but not shown. + """ + self.main_skill = main_skill + self.actions_skill = actions_skill + + @classmethod + def from_dict(cls, _dict: Dict) -> 'MessageContextSkills': + """Initialize a MessageContextSkills object from a json dictionary.""" + args = {} + if (main_skill := _dict.get('main skill')) is not None: + args['main_skill'] = MessageContextDialogSkill.from_dict(main_skill) + if (actions_skill := _dict.get('actions skill')) is not None: + args['actions_skill'] = MessageContextActionSkill.from_dict( + actions_skill) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageContextSkills object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'main_skill') and self.main_skill is not None: + if isinstance(self.main_skill, dict): + _dict['main skill'] = self.main_skill + else: + _dict['main skill'] = self.main_skill.to_dict() + if hasattr(self, 'actions_skill') and self.actions_skill is not None: + if isinstance(self.actions_skill, dict): + _dict['actions skill'] = self.actions_skill + else: + _dict['actions skill'] = self.actions_skill.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MessageContextSkills object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'MessageContextSkills') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'MessageContextSkills') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class MessageInput: + """ + An input object that includes the input text. + + :param str message_type: (optional) The type of the message: + - `text`: The user input is processed normally by the assistant. + - `search`: Only search results are returned. (Any dialog or action skill is + bypassed.) + **Note:** A `search` message results in an error if no search skill is + configured for the assistant. + :param str text: (optional) The text of the user input. This string cannot + contain carriage return, newline, or tab characters. + :param List[RuntimeIntent] intents: (optional) Intents to use when evaluating + the user input. Include intents from the previous response to continue using + those intents rather than trying to recognize intents in the new input. + :param List[RuntimeEntity] entities: (optional) Entities to use when evaluating + the message. Include entities from the previous response to continue using those + entities rather than detecting entities in the new input. + :param str suggestion_id: (optional) For internal use only. + :param List[MessageInputAttachment] attachments: (optional) An array of + multimedia attachments to be sent with the message. Attachments are not + processed by the assistant itself, but can be sent to external services by + webhooks. + **Note:** Attachments are not supported on IBM Cloud Pak for Data. + :param RequestAnalytics analytics: (optional) An optional object containing + analytics data. Currently, this data is used only for events sent to the Segment + extension. + :param MessageInputOptions options: (optional) Optional properties that control + how the assistant responds. + """ + + def __init__( + self, + *, + message_type: Optional[str] = None, + text: Optional[str] = None, + intents: Optional[List['RuntimeIntent']] = None, + entities: Optional[List['RuntimeEntity']] = None, + suggestion_id: Optional[str] = None, + attachments: Optional[List['MessageInputAttachment']] = None, + analytics: Optional['RequestAnalytics'] = None, + options: Optional['MessageInputOptions'] = None, + ) -> None: + """ + Initialize a MessageInput object. + + :param str message_type: (optional) The type of the message: + - `text`: The user input is processed normally by the assistant. + - `search`: Only search results are returned. (Any dialog or action skill + is bypassed.) + **Note:** A `search` message results in an error if no search skill is + configured for the assistant. + :param str text: (optional) The text of the user input. This string cannot + contain carriage return, newline, or tab characters. + :param List[RuntimeIntent] intents: (optional) Intents to use when + evaluating the user input. Include intents from the previous response to + continue using those intents rather than trying to recognize intents in the + new input. + :param List[RuntimeEntity] entities: (optional) Entities to use when + evaluating the message. Include entities from the previous response to + continue using those entities rather than detecting entities in the new + input. + :param str suggestion_id: (optional) For internal use only. + :param List[MessageInputAttachment] attachments: (optional) An array of + multimedia attachments to be sent with the message. Attachments are not + processed by the assistant itself, but can be sent to external services by + webhooks. + **Note:** Attachments are not supported on IBM Cloud Pak for Data. + :param RequestAnalytics analytics: (optional) An optional object containing + analytics data. Currently, this data is used only for events sent to the + Segment extension. + :param MessageInputOptions options: (optional) Optional properties that + control how the assistant responds. + """ + self.message_type = message_type + self.text = text + self.intents = intents + self.entities = entities + self.suggestion_id = suggestion_id + self.attachments = attachments + self.analytics = analytics + self.options = options + + @classmethod + def from_dict(cls, _dict: Dict) -> 'MessageInput': + """Initialize a MessageInput object from a json dictionary.""" + args = {} + if (message_type := _dict.get('message_type')) is not None: + args['message_type'] = message_type + if (text := _dict.get('text')) is not None: + args['text'] = text + if (intents := _dict.get('intents')) is not None: + args['intents'] = [RuntimeIntent.from_dict(v) for v in intents] + if (entities := _dict.get('entities')) is not None: + args['entities'] = [RuntimeEntity.from_dict(v) for v in entities] + if (suggestion_id := _dict.get('suggestion_id')) is not None: + args['suggestion_id'] = suggestion_id + if (attachments := _dict.get('attachments')) is not None: + args['attachments'] = [ + MessageInputAttachment.from_dict(v) for v in attachments + ] + if (analytics := _dict.get('analytics')) is not None: + args['analytics'] = RequestAnalytics.from_dict(analytics) + if (options := _dict.get('options')) is not None: + args['options'] = MessageInputOptions.from_dict(options) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageInput object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'message_type') and self.message_type is not None: + _dict['message_type'] = self.message_type + if hasattr(self, 'text') and self.text is not None: + _dict['text'] = self.text + if hasattr(self, 'intents') and self.intents is not None: + intents_list = [] + for v in self.intents: + if isinstance(v, dict): + intents_list.append(v) + else: + intents_list.append(v.to_dict()) + _dict['intents'] = intents_list + if hasattr(self, 'entities') and self.entities is not None: + entities_list = [] + for v in self.entities: + if isinstance(v, dict): + entities_list.append(v) + else: + entities_list.append(v.to_dict()) + _dict['entities'] = entities_list + if hasattr(self, 'suggestion_id') and self.suggestion_id is not None: + _dict['suggestion_id'] = self.suggestion_id + if hasattr(self, 'attachments') and self.attachments is not None: + attachments_list = [] + for v in self.attachments: + if isinstance(v, dict): + attachments_list.append(v) + else: + attachments_list.append(v.to_dict()) + _dict['attachments'] = attachments_list + if hasattr(self, 'analytics') and self.analytics is not None: + if isinstance(self.analytics, dict): + _dict['analytics'] = self.analytics + else: + _dict['analytics'] = self.analytics.to_dict() + if hasattr(self, 'options') and self.options is not None: + if isinstance(self.options, dict): + _dict['options'] = self.options + else: + _dict['options'] = self.options.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MessageInput object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'MessageInput') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'MessageInput') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class MessageTypeEnum(str, Enum): + """ + The type of the message: + - `text`: The user input is processed normally by the assistant. + - `search`: Only search results are returned. (Any dialog or action skill is + bypassed.) + **Note:** A `search` message results in an error if no search skill is configured + for the assistant. + """ + + TEXT = 'text' + SEARCH = 'search' + + +class MessageInputAttachment: + """ + A reference to a media file to be sent as an attachment with the message. + + :param str url: The URL of the media file. + :param str media_type: (optional) The media content type (such as a MIME type) + of the attachment. + """ + + def __init__( + self, + url: str, + *, + media_type: Optional[str] = None, + ) -> None: + """ + Initialize a MessageInputAttachment object. + + :param str url: The URL of the media file. + :param str media_type: (optional) The media content type (such as a MIME + type) of the attachment. + """ + self.url = url + self.media_type = media_type + + @classmethod + def from_dict(cls, _dict: Dict) -> 'MessageInputAttachment': + """Initialize a MessageInputAttachment object from a json dictionary.""" + args = {} + if (url := _dict.get('url')) is not None: + args['url'] = url + else: + raise ValueError( + 'Required property \'url\' not present in MessageInputAttachment JSON' + ) + if (media_type := _dict.get('media_type')) is not None: + args['media_type'] = media_type + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageInputAttachment object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'url') and self.url is not None: + _dict['url'] = self.url + if hasattr(self, 'media_type') and self.media_type is not None: + _dict['media_type'] = self.media_type + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MessageInputAttachment object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'MessageInputAttachment') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'MessageInputAttachment') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class MessageInputOptions: + """ + Optional properties that control how the assistant responds. + + :param bool restart: (optional) Whether to restart dialog processing at the root + of the dialog, regardless of any previously visited nodes. **Note:** This does + not affect `turn_count` or any other context variables. + :param bool alternate_intents: (optional) Whether to return more than one + intent. Set to `true` to return all matching intents. + :param bool async_callout: (optional) Whether custom extension callouts are + executed asynchronously. Asynchronous execution means the response to the + extension callout will be processed on the subsequent message call, the initial + message response signals to the client that the operation may be long running. + With synchronous execution the custom extension is executed and returns the + response in a single message turn. **Note:** **async_callout** defaults to true + for API versions earlier than 2023-06-15. + :param MessageInputOptionsSpelling spelling: (optional) Spelling correction + options for the message. Any options specified on an individual message override + the settings configured for the skill. + :param bool debug: (optional) Whether to return additional diagnostic + information. Set to `true` to return additional information in the + `output.debug` property. If you also specify **return_context**=`true`, the + returned skill context includes the `system.state` property. + :param bool return_context: (optional) Whether to return session context with + the response. If you specify `true`, the response includes the `context` + property. If you also specify **debug**=`true`, the returned skill context + includes the `system.state` property. + :param bool export: (optional) Whether to return session context, including full + conversation state. If you specify `true`, the response includes the `context` + property, and the skill context includes the `system.state` property. + **Note:** If **export**=`true`, the context is returned regardless of the value + of **return_context**. + """ + + def __init__( + self, + *, + restart: Optional[bool] = None, + alternate_intents: Optional[bool] = None, + async_callout: Optional[bool] = None, + spelling: Optional['MessageInputOptionsSpelling'] = None, + debug: Optional[bool] = None, + return_context: Optional[bool] = None, + export: Optional[bool] = None, + ) -> None: + """ + Initialize a MessageInputOptions object. + + :param bool restart: (optional) Whether to restart dialog processing at the + root of the dialog, regardless of any previously visited nodes. **Note:** + This does not affect `turn_count` or any other context variables. + :param bool alternate_intents: (optional) Whether to return more than one + intent. Set to `true` to return all matching intents. + :param bool async_callout: (optional) Whether custom extension callouts are + executed asynchronously. Asynchronous execution means the response to the + extension callout will be processed on the subsequent message call, the + initial message response signals to the client that the operation may be + long running. With synchronous execution the custom extension is executed + and returns the response in a single message turn. **Note:** + **async_callout** defaults to true for API versions earlier than + 2023-06-15. + :param MessageInputOptionsSpelling spelling: (optional) Spelling correction + options for the message. Any options specified on an individual message + override the settings configured for the skill. + :param bool debug: (optional) Whether to return additional diagnostic + information. Set to `true` to return additional information in the + `output.debug` property. If you also specify **return_context**=`true`, the + returned skill context includes the `system.state` property. + :param bool return_context: (optional) Whether to return session context + with the response. If you specify `true`, the response includes the + `context` property. If you also specify **debug**=`true`, the returned + skill context includes the `system.state` property. + :param bool export: (optional) Whether to return session context, including + full conversation state. If you specify `true`, the response includes the + `context` property, and the skill context includes the `system.state` + property. + **Note:** If **export**=`true`, the context is returned regardless of the + value of **return_context**. + """ + self.restart = restart + self.alternate_intents = alternate_intents + self.async_callout = async_callout + self.spelling = spelling + self.debug = debug + self.return_context = return_context + self.export = export + + @classmethod + def from_dict(cls, _dict: Dict) -> 'MessageInputOptions': + """Initialize a MessageInputOptions object from a json dictionary.""" + args = {} + if (restart := _dict.get('restart')) is not None: + args['restart'] = restart + if (alternate_intents := _dict.get('alternate_intents')) is not None: + args['alternate_intents'] = alternate_intents + if (async_callout := _dict.get('async_callout')) is not None: + args['async_callout'] = async_callout + if (spelling := _dict.get('spelling')) is not None: + args['spelling'] = MessageInputOptionsSpelling.from_dict(spelling) + if (debug := _dict.get('debug')) is not None: + args['debug'] = debug + if (return_context := _dict.get('return_context')) is not None: + args['return_context'] = return_context + if (export := _dict.get('export')) is not None: + args['export'] = export + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageInputOptions object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'restart') and self.restart is not None: + _dict['restart'] = self.restart + if hasattr(self, + 'alternate_intents') and self.alternate_intents is not None: + _dict['alternate_intents'] = self.alternate_intents + if hasattr(self, 'async_callout') and self.async_callout is not None: + _dict['async_callout'] = self.async_callout + if hasattr(self, 'spelling') and self.spelling is not None: + if isinstance(self.spelling, dict): + _dict['spelling'] = self.spelling + else: + _dict['spelling'] = self.spelling.to_dict() + if hasattr(self, 'debug') and self.debug is not None: + _dict['debug'] = self.debug + if hasattr(self, 'return_context') and self.return_context is not None: + _dict['return_context'] = self.return_context + if hasattr(self, 'export') and self.export is not None: + _dict['export'] = self.export + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MessageInputOptions object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'MessageInputOptions') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'MessageInputOptions') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class MessageInputOptionsSpelling: + """ + Spelling correction options for the message. Any options specified on an individual + message override the settings configured for the skill. + + :param bool suggestions: (optional) Whether to use spelling correction when + processing the input. If spelling correction is used and **auto_correct** is + `true`, any spelling corrections are automatically applied to the user input. If + **auto_correct** is `false`, any suggested corrections are returned in the + **output.spelling** property. + This property overrides the value of the **spelling_suggestions** property in + the workspace settings for the skill. + :param bool auto_correct: (optional) Whether to use autocorrection when + processing the input. If this property is `true`, any corrections are + automatically applied to the user input, and the original text is returned in + the **output.spelling** property of the message response. This property + overrides the value of the **spelling_auto_correct** property in the workspace + settings for the skill. + """ + + def __init__( + self, + *, + suggestions: Optional[bool] = None, + auto_correct: Optional[bool] = None, + ) -> None: + """ + Initialize a MessageInputOptionsSpelling object. + + :param bool suggestions: (optional) Whether to use spelling correction when + processing the input. If spelling correction is used and **auto_correct** + is `true`, any spelling corrections are automatically applied to the user + input. If **auto_correct** is `false`, any suggested corrections are + returned in the **output.spelling** property. + This property overrides the value of the **spelling_suggestions** property + in the workspace settings for the skill. + :param bool auto_correct: (optional) Whether to use autocorrection when + processing the input. If this property is `true`, any corrections are + automatically applied to the user input, and the original text is returned + in the **output.spelling** property of the message response. This property + overrides the value of the **spelling_auto_correct** property in the + workspace settings for the skill. + """ + self.suggestions = suggestions + self.auto_correct = auto_correct + + @classmethod + def from_dict(cls, _dict: Dict) -> 'MessageInputOptionsSpelling': + """Initialize a MessageInputOptionsSpelling object from a json dictionary.""" + args = {} + if (suggestions := _dict.get('suggestions')) is not None: + args['suggestions'] = suggestions + if (auto_correct := _dict.get('auto_correct')) is not None: + args['auto_correct'] = auto_correct + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageInputOptionsSpelling object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'suggestions') and self.suggestions is not None: + _dict['suggestions'] = self.suggestions + if hasattr(self, 'auto_correct') and self.auto_correct is not None: + _dict['auto_correct'] = self.auto_correct + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MessageInputOptionsSpelling object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'MessageInputOptionsSpelling') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'MessageInputOptionsSpelling') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class MessageOutput: + """ + Assistant output to be rendered or processed by the client. + + :param List[RuntimeResponseGeneric] generic: (optional) Output intended for any + channel. It is the responsibility of the client application to implement the + supported response types. + :param List[RuntimeIntent] intents: (optional) An array of intents recognized in + the user input, sorted in descending order of confidence. + :param List[RuntimeEntity] entities: (optional) An array of entities identified + in the user input. + :param List[DialogNodeAction] actions: (optional) An array of objects describing + any actions requested by the dialog node. + :param MessageOutputDebug debug: (optional) Additional detailed information + about a message response and how it was generated. + :param dict user_defined: (optional) An object containing any custom properties + included in the response. This object includes any arbitrary properties defined + in the dialog JSON editor as part of the dialog node output. + :param MessageOutputSpelling spelling: (optional) Properties describing any + spelling corrections in the user input that was received. + :param List[MessageOutputLLMMetadata] llm_metadata: (optional) An array of + objects that provide information about calls to large language models that + occured as part of handling this message. + """ + + def __init__( + self, + *, + generic: Optional[List['RuntimeResponseGeneric']] = None, + intents: Optional[List['RuntimeIntent']] = None, + entities: Optional[List['RuntimeEntity']] = None, + actions: Optional[List['DialogNodeAction']] = None, + debug: Optional['MessageOutputDebug'] = None, + user_defined: Optional[dict] = None, + spelling: Optional['MessageOutputSpelling'] = None, + llm_metadata: Optional[List['MessageOutputLLMMetadata']] = None, + ) -> None: + """ + Initialize a MessageOutput object. + + :param List[RuntimeResponseGeneric] generic: (optional) Output intended for + any channel. It is the responsibility of the client application to + implement the supported response types. + :param List[RuntimeIntent] intents: (optional) An array of intents + recognized in the user input, sorted in descending order of confidence. + :param List[RuntimeEntity] entities: (optional) An array of entities + identified in the user input. + :param List[DialogNodeAction] actions: (optional) An array of objects + describing any actions requested by the dialog node. + :param MessageOutputDebug debug: (optional) Additional detailed information + about a message response and how it was generated. + :param dict user_defined: (optional) An object containing any custom + properties included in the response. This object includes any arbitrary + properties defined in the dialog JSON editor as part of the dialog node + output. + :param MessageOutputSpelling spelling: (optional) Properties describing any + spelling corrections in the user input that was received. + :param List[MessageOutputLLMMetadata] llm_metadata: (optional) An array of + objects that provide information about calls to large language models that + occured as part of handling this message. + """ + self.generic = generic + self.intents = intents + self.entities = entities + self.actions = actions + self.debug = debug + self.user_defined = user_defined + self.spelling = spelling + self.llm_metadata = llm_metadata + + @classmethod + def from_dict(cls, _dict: Dict) -> 'MessageOutput': + """Initialize a MessageOutput object from a json dictionary.""" + args = {} + if (generic := _dict.get('generic')) is not None: + args['generic'] = [ + RuntimeResponseGeneric.from_dict(v) for v in generic + ] + if (intents := _dict.get('intents')) is not None: + args['intents'] = [RuntimeIntent.from_dict(v) for v in intents] + if (entities := _dict.get('entities')) is not None: + args['entities'] = [RuntimeEntity.from_dict(v) for v in entities] + if (actions := _dict.get('actions')) is not None: + args['actions'] = [DialogNodeAction.from_dict(v) for v in actions] + if (debug := _dict.get('debug')) is not None: + args['debug'] = MessageOutputDebug.from_dict(debug) + if (user_defined := _dict.get('user_defined')) is not None: + args['user_defined'] = user_defined + if (spelling := _dict.get('spelling')) is not None: + args['spelling'] = MessageOutputSpelling.from_dict(spelling) + if (llm_metadata := _dict.get('llm_metadata')) is not None: + args['llm_metadata'] = [ + MessageOutputLLMMetadata.from_dict(v) for v in llm_metadata + ] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageOutput object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'generic') and self.generic is not None: + generic_list = [] + for v in self.generic: + if isinstance(v, dict): + generic_list.append(v) + else: + generic_list.append(v.to_dict()) + _dict['generic'] = generic_list + if hasattr(self, 'intents') and self.intents is not None: + intents_list = [] + for v in self.intents: + if isinstance(v, dict): + intents_list.append(v) + else: + intents_list.append(v.to_dict()) + _dict['intents'] = intents_list + if hasattr(self, 'entities') and self.entities is not None: + entities_list = [] + for v in self.entities: + if isinstance(v, dict): + entities_list.append(v) + else: + entities_list.append(v.to_dict()) + _dict['entities'] = entities_list + if hasattr(self, 'actions') and self.actions is not None: + actions_list = [] + for v in self.actions: + if isinstance(v, dict): + actions_list.append(v) + else: + actions_list.append(v.to_dict()) + _dict['actions'] = actions_list + if hasattr(self, 'debug') and self.debug is not None: + if isinstance(self.debug, dict): + _dict['debug'] = self.debug + else: + _dict['debug'] = self.debug.to_dict() + if hasattr(self, 'user_defined') and self.user_defined is not None: + _dict['user_defined'] = self.user_defined + if hasattr(self, 'spelling') and self.spelling is not None: + if isinstance(self.spelling, dict): + _dict['spelling'] = self.spelling + else: + _dict['spelling'] = self.spelling.to_dict() + if hasattr(self, 'llm_metadata') and self.llm_metadata is not None: + llm_metadata_list = [] + for v in self.llm_metadata: + if isinstance(v, dict): + llm_metadata_list.append(v) + else: + llm_metadata_list.append(v.to_dict()) + _dict['llm_metadata'] = llm_metadata_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MessageOutput object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'MessageOutput') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'MessageOutput') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class MessageOutputDebug: + """ + Additional detailed information about a message response and how it was generated. + + :param List[DialogNodeVisited] nodes_visited: (optional) An array of objects + containing detailed diagnostic information about dialog nodes that were visited + during processing of the input message. + :param List[DialogLogMessage] log_messages: (optional) An array of up to 50 + messages logged with the request. + :param bool branch_exited: (optional) Assistant sets this to true when this + message response concludes or interrupts a dialog. + :param str branch_exited_reason: (optional) When `branch_exited` is set to + `true` by the assistant, the `branch_exited_reason` specifies whether the dialog + completed by itself or got interrupted. + :param List[MessageOutputDebugTurnEvent] turn_events: (optional) An array of + objects containing detailed diagnostic information about dialog nodes and + actions that were visited during processing of the input message. + This property is present only if the assistant has an action skill. + """ + + def __init__( + self, + *, + nodes_visited: Optional[List['DialogNodeVisited']] = None, + log_messages: Optional[List['DialogLogMessage']] = None, + branch_exited: Optional[bool] = None, + branch_exited_reason: Optional[str] = None, + turn_events: Optional[List['MessageOutputDebugTurnEvent']] = None, + ) -> None: + """ + Initialize a MessageOutputDebug object. + + :param List[DialogNodeVisited] nodes_visited: (optional) An array of + objects containing detailed diagnostic information about dialog nodes that + were visited during processing of the input message. + :param List[DialogLogMessage] log_messages: (optional) An array of up to 50 + messages logged with the request. + :param bool branch_exited: (optional) Assistant sets this to true when this + message response concludes or interrupts a dialog. + :param str branch_exited_reason: (optional) When `branch_exited` is set to + `true` by the assistant, the `branch_exited_reason` specifies whether the + dialog completed by itself or got interrupted. + :param List[MessageOutputDebugTurnEvent] turn_events: (optional) An array + of objects containing detailed diagnostic information about dialog nodes + and actions that were visited during processing of the input message. + This property is present only if the assistant has an action skill. + """ + self.nodes_visited = nodes_visited + self.log_messages = log_messages + self.branch_exited = branch_exited + self.branch_exited_reason = branch_exited_reason + self.turn_events = turn_events + + @classmethod + def from_dict(cls, _dict: Dict) -> 'MessageOutputDebug': + """Initialize a MessageOutputDebug object from a json dictionary.""" + args = {} + if (nodes_visited := _dict.get('nodes_visited')) is not None: + args['nodes_visited'] = [ + DialogNodeVisited.from_dict(v) for v in nodes_visited + ] + if (log_messages := _dict.get('log_messages')) is not None: + args['log_messages'] = [ + DialogLogMessage.from_dict(v) for v in log_messages + ] + if (branch_exited := _dict.get('branch_exited')) is not None: + args['branch_exited'] = branch_exited + if (branch_exited_reason := + _dict.get('branch_exited_reason')) is not None: + args['branch_exited_reason'] = branch_exited_reason + if (turn_events := _dict.get('turn_events')) is not None: + args['turn_events'] = [ + MessageOutputDebugTurnEvent.from_dict(v) for v in turn_events + ] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageOutputDebug object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'nodes_visited') and self.nodes_visited is not None: + nodes_visited_list = [] + for v in self.nodes_visited: + if isinstance(v, dict): + nodes_visited_list.append(v) + else: + nodes_visited_list.append(v.to_dict()) + _dict['nodes_visited'] = nodes_visited_list + if hasattr(self, 'log_messages') and self.log_messages is not None: + log_messages_list = [] + for v in self.log_messages: + if isinstance(v, dict): + log_messages_list.append(v) + else: + log_messages_list.append(v.to_dict()) + _dict['log_messages'] = log_messages_list + if hasattr(self, 'branch_exited') and self.branch_exited is not None: + _dict['branch_exited'] = self.branch_exited + if hasattr(self, 'branch_exited_reason' + ) and self.branch_exited_reason is not None: + _dict['branch_exited_reason'] = self.branch_exited_reason + if hasattr(self, 'turn_events') and self.turn_events is not None: + turn_events_list = [] + for v in self.turn_events: + if isinstance(v, dict): + turn_events_list.append(v) + else: + turn_events_list.append(v.to_dict()) + _dict['turn_events'] = turn_events_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MessageOutputDebug object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'MessageOutputDebug') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'MessageOutputDebug') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class BranchExitedReasonEnum(str, Enum): + """ + When `branch_exited` is set to `true` by the assistant, the `branch_exited_reason` + specifies whether the dialog completed by itself or got interrupted. + """ + + COMPLETED = 'completed' + FALLBACK = 'fallback' + + +class MessageOutputDebugTurnEvent: + """ + MessageOutputDebugTurnEvent. + + """ + + def __init__(self,) -> None: + """ + Initialize a MessageOutputDebugTurnEvent object. + + """ + msg = "Cannot instantiate base class. Instead, instantiate one of the defined subclasses: {0}".format( + ", ".join([ + 'MessageOutputDebugTurnEventTurnEventActionVisited', + 'MessageOutputDebugTurnEventTurnEventActionFinished', + 'MessageOutputDebugTurnEventTurnEventStepVisited', + 'MessageOutputDebugTurnEventTurnEventStepAnswered', + 'MessageOutputDebugTurnEventTurnEventHandlerVisited', + 'MessageOutputDebugTurnEventTurnEventCallout', + 'MessageOutputDebugTurnEventTurnEventSearch', + 'MessageOutputDebugTurnEventTurnEventNodeVisited', + 'MessageOutputDebugTurnEventTurnEventConversationalSearchEnd', + 'MessageOutputDebugTurnEventTurnEventManualRoute', + 'MessageOutputDebugTurnEventTurnEventTopicSwitchDenied', + 'MessageOutputDebugTurnEventTurnEventActionRoutingDenied', + 'MessageOutputDebugTurnEventTurnEventSuggestionIntentsDenied', + 'MessageOutputDebugTurnEventTurnEventGenerativeAICalled', + 'MessageOutputDebugTurnEventTurnEventClientActions' + ])) + raise Exception(msg) + + @classmethod + def from_dict(cls, _dict: Dict) -> 'MessageOutputDebugTurnEvent': + """Initialize a MessageOutputDebugTurnEvent object from a json dictionary.""" + disc_class = cls._get_class_by_discriminator(_dict) + if disc_class != cls: + return disc_class.from_dict(_dict) + msg = "Cannot convert dictionary into an instance of base class 'MessageOutputDebugTurnEvent'. The discriminator value should map to a valid subclass: {1}".format( + ", ".join([ + 'MessageOutputDebugTurnEventTurnEventActionVisited', + 'MessageOutputDebugTurnEventTurnEventActionFinished', + 'MessageOutputDebugTurnEventTurnEventStepVisited', + 'MessageOutputDebugTurnEventTurnEventStepAnswered', + 'MessageOutputDebugTurnEventTurnEventHandlerVisited', + 'MessageOutputDebugTurnEventTurnEventCallout', + 'MessageOutputDebugTurnEventTurnEventSearch', + 'MessageOutputDebugTurnEventTurnEventNodeVisited', + 'MessageOutputDebugTurnEventTurnEventConversationalSearchEnd', + 'MessageOutputDebugTurnEventTurnEventManualRoute', + 'MessageOutputDebugTurnEventTurnEventTopicSwitchDenied', + 'MessageOutputDebugTurnEventTurnEventActionRoutingDenied', + 'MessageOutputDebugTurnEventTurnEventSuggestionIntentsDenied', + 'MessageOutputDebugTurnEventTurnEventGenerativeAICalled', + 'MessageOutputDebugTurnEventTurnEventClientActions' + ])) + raise Exception(msg) + + @classmethod + def _from_dict(cls, _dict: Dict): + """Initialize a MessageOutputDebugTurnEvent object from a json dictionary.""" + return cls.from_dict(_dict) + + @classmethod + def _get_class_by_discriminator(cls, _dict: Dict) -> object: + mapping = {} + mapping[ + 'action_visited'] = 'MessageOutputDebugTurnEventTurnEventActionVisited' + mapping[ + 'action_finished'] = 'MessageOutputDebugTurnEventTurnEventActionFinished' + mapping[ + 'step_visited'] = 'MessageOutputDebugTurnEventTurnEventStepVisited' + mapping[ + 'step_answered'] = 'MessageOutputDebugTurnEventTurnEventStepAnswered' + mapping[ + 'handler_visited'] = 'MessageOutputDebugTurnEventTurnEventHandlerVisited' + mapping['callout'] = 'MessageOutputDebugTurnEventTurnEventCallout' + mapping['search'] = 'MessageOutputDebugTurnEventTurnEventSearch' + mapping[ + 'node_visited'] = 'MessageOutputDebugTurnEventTurnEventNodeVisited' + mapping[ + 'conversational_search_end'] = 'MessageOutputDebugTurnEventTurnEventConversationalSearchEnd' + mapping[ + 'manual_route'] = 'MessageOutputDebugTurnEventTurnEventManualRoute' + mapping[ + 'topic_switch_denied'] = 'MessageOutputDebugTurnEventTurnEventTopicSwitchDenied' + mapping[ + 'action_routing_denied'] = 'MessageOutputDebugTurnEventTurnEventActionRoutingDenied' + mapping[ + 'suggestion_intents_denied'] = 'MessageOutputDebugTurnEventTurnEventSuggestionIntentsDenied' + mapping[ + 'generative_ai_called'] = 'MessageOutputDebugTurnEventTurnEventGenerativeAICalled' + mapping[ + 'client_actions'] = 'MessageOutputDebugTurnEventTurnEventClientActions' + disc_value = _dict.get('event') + if disc_value is None: + raise ValueError( + 'Discriminator property \'event\' not found in MessageOutputDebugTurnEvent JSON' + ) + class_name = mapping.get(disc_value, disc_value) + try: + disc_class = getattr(sys.modules[__name__], class_name) + except AttributeError: + disc_class = cls + if isinstance(disc_class, object): + return disc_class + raise TypeError('%s is not a discriminator class' % class_name) + + +class MessageOutputLLMMetadata: + """ + MessageOutputLLMMetadata. + + :param str task: (optional) The task that used a large language model. + :param str model_id: (optional) The id for the large language model used for the + task. + """ + + def __init__( + self, + *, + task: Optional[str] = None, + model_id: Optional[str] = None, + ) -> None: + """ + Initialize a MessageOutputLLMMetadata object. + + :param str task: (optional) The task that used a large language model. + :param str model_id: (optional) The id for the large language model used + for the task. + """ + self.task = task + self.model_id = model_id + + @classmethod + def from_dict(cls, _dict: Dict) -> 'MessageOutputLLMMetadata': + """Initialize a MessageOutputLLMMetadata object from a json dictionary.""" + args = {} + if (task := _dict.get('task')) is not None: + args['task'] = task + if (model_id := _dict.get('model_id')) is not None: + args['model_id'] = model_id + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageOutputLLMMetadata object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'task') and self.task is not None: + _dict['task'] = self.task + if hasattr(self, 'model_id') and self.model_id is not None: + _dict['model_id'] = self.model_id + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MessageOutputLLMMetadata object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'MessageOutputLLMMetadata') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'MessageOutputLLMMetadata') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class MessageOutputSpelling: + """ + Properties describing any spelling corrections in the user input that was received. + + :param str text: (optional) The user input text that was used to generate the + response. If spelling autocorrection is enabled, this text reflects any spelling + corrections that were applied. + :param str original_text: (optional) The original user input text. This property + is returned only if autocorrection is enabled and the user input was corrected. + :param str suggested_text: (optional) Any suggested corrections of the input + text. This property is returned only if spelling correction is enabled and + autocorrection is disabled. + """ + + def __init__( + self, + *, + text: Optional[str] = None, + original_text: Optional[str] = None, + suggested_text: Optional[str] = None, + ) -> None: + """ + Initialize a MessageOutputSpelling object. + + :param str text: (optional) The user input text that was used to generate + the response. If spelling autocorrection is enabled, this text reflects any + spelling corrections that were applied. + :param str original_text: (optional) The original user input text. This + property is returned only if autocorrection is enabled and the user input + was corrected. + :param str suggested_text: (optional) Any suggested corrections of the + input text. This property is returned only if spelling correction is + enabled and autocorrection is disabled. + """ + self.text = text + self.original_text = original_text + self.suggested_text = suggested_text + + @classmethod + def from_dict(cls, _dict: Dict) -> 'MessageOutputSpelling': + """Initialize a MessageOutputSpelling object from a json dictionary.""" + args = {} + if (text := _dict.get('text')) is not None: + args['text'] = text + if (original_text := _dict.get('original_text')) is not None: + args['original_text'] = original_text + if (suggested_text := _dict.get('suggested_text')) is not None: + args['suggested_text'] = suggested_text + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageOutputSpelling object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'text') and self.text is not None: + _dict['text'] = self.text + if hasattr(self, 'original_text') and self.original_text is not None: + _dict['original_text'] = self.original_text + if hasattr(self, 'suggested_text') and self.suggested_text is not None: + _dict['suggested_text'] = self.suggested_text + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MessageOutputSpelling object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'MessageOutputSpelling') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'MessageOutputSpelling') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class MessageStreamMetadata: + """ + Contains meta-information about the item(s) being streamed. + + :param Metadata streaming_metadata: Contains meta-information about the item(s) + being streamed. + """ + + def __init__( + self, + streaming_metadata: 'Metadata', + ) -> None: + """ + Initialize a MessageStreamMetadata object. + + :param Metadata streaming_metadata: Contains meta-information about the + item(s) being streamed. + """ + self.streaming_metadata = streaming_metadata + + @classmethod + def from_dict(cls, _dict: Dict) -> 'MessageStreamMetadata': + """Initialize a MessageStreamMetadata object from a json dictionary.""" + args = {} + if (streaming_metadata := _dict.get('streaming_metadata')) is not None: + args['streaming_metadata'] = Metadata.from_dict(streaming_metadata) + else: + raise ValueError( + 'Required property \'streaming_metadata\' not present in MessageStreamMetadata JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageStreamMetadata object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr( + self, + 'streaming_metadata') and self.streaming_metadata is not None: + if isinstance(self.streaming_metadata, dict): + _dict['streaming_metadata'] = self.streaming_metadata + else: + _dict['streaming_metadata'] = self.streaming_metadata.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MessageStreamMetadata object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'MessageStreamMetadata') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'MessageStreamMetadata') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class MessageStreamResponse: + """ + A streamed response from the watsonx Assistant service. + + """ + + def __init__(self,) -> None: + """ + Initialize a MessageStreamResponse object. + + """ + msg = "Cannot instantiate base class. Instead, instantiate one of the defined subclasses: {0}".format( + ", ".join([ + 'MessageStreamResponseMessageStreamPartialItem', + 'MessageStreamResponseMessageStreamCompleteItem', + 'MessageStreamResponseStatefulMessageStreamFinalResponse' + ])) + raise Exception(msg) + + +class Metadata: + """ + Contains meta-information about the item(s) being streamed. + + :param int id: (optional) Identifies the index and sequence of the current + streamed response item. + """ + + def __init__( + self, + *, + id: Optional[int] = None, + ) -> None: + """ + Initialize a Metadata object. + + :param int id: (optional) Identifies the index and sequence of the current + streamed response item. + """ + self.id = id + + @classmethod + def from_dict(cls, _dict: Dict) -> 'Metadata': + """Initialize a Metadata object from a json dictionary.""" + args = {} + if (id := _dict.get('id')) is not None: + args['id'] = id + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Metadata object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'id') and self.id is not None: + _dict['id'] = self.id + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Metadata object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'Metadata') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'Metadata') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class MonitorAssistantReleaseImportArtifactResponse: + """ + MonitorAssistantReleaseImportArtifactResponse. + + :param str status: (optional) The current status of the release import process: + - **Completed**: The artifact import has completed. + - **Failed**: The asynchronous artifact import process has failed. + - **Processing**: An asynchronous operation to import the artifact is underway + and not yet completed. + :param str task_id: (optional) A unique identifier for a background asynchronous + task that is executing or has executed the operation. + :param str assistant_id: (optional) The ID of the assistant to which the release + belongs. + :param List[StatusError] status_errors: (optional) An array of messages about + errors that caused an asynchronous operation to fail. Included only if + **status**=`Failed`. + :param str status_description: (optional) The description of the failed + asynchronous operation. Included only if **status**=`Failed`. + :param List[str] skill_impact_in_draft: (optional) An array of skill types in + the draft environment which will be overridden with skills from the artifact + being imported. + :param datetime created: (optional) The timestamp for creation of the object. + :param datetime updated: (optional) The timestamp for the most recent update to + the object. + """ + + def __init__( + self, + *, + status: Optional[str] = None, + task_id: Optional[str] = None, + assistant_id: Optional[str] = None, + status_errors: Optional[List['StatusError']] = None, + status_description: Optional[str] = None, + skill_impact_in_draft: Optional[List[str]] = None, + created: Optional[datetime] = None, + updated: Optional[datetime] = None, + ) -> None: + """ + Initialize a MonitorAssistantReleaseImportArtifactResponse object. + + :param List[str] skill_impact_in_draft: (optional) An array of skill types + in the draft environment which will be overridden with skills from the + artifact being imported. + """ + self.status = status + self.task_id = task_id + self.assistant_id = assistant_id + self.status_errors = status_errors + self.status_description = status_description + self.skill_impact_in_draft = skill_impact_in_draft + self.created = created + self.updated = updated + + @classmethod + def from_dict( + cls, + _dict: Dict) -> 'MonitorAssistantReleaseImportArtifactResponse': + """Initialize a MonitorAssistantReleaseImportArtifactResponse object from a json dictionary.""" + args = {} + if (status := _dict.get('status')) is not None: + args['status'] = status + if (task_id := _dict.get('task_id')) is not None: + args['task_id'] = task_id + if (assistant_id := _dict.get('assistant_id')) is not None: + args['assistant_id'] = assistant_id + if (status_errors := _dict.get('status_errors')) is not None: + args['status_errors'] = [ + StatusError.from_dict(v) for v in status_errors + ] + if (status_description := _dict.get('status_description')) is not None: + args['status_description'] = status_description + if (skill_impact_in_draft := + _dict.get('skill_impact_in_draft')) is not None: + args['skill_impact_in_draft'] = skill_impact_in_draft + if (created := _dict.get('created')) is not None: + args['created'] = string_to_datetime(created) + if (updated := _dict.get('updated')) is not None: + args['updated'] = string_to_datetime(updated) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MonitorAssistantReleaseImportArtifactResponse object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'status') and getattr(self, 'status') is not None: + _dict['status'] = getattr(self, 'status') + if hasattr(self, 'task_id') and getattr(self, 'task_id') is not None: + _dict['task_id'] = getattr(self, 'task_id') + if hasattr(self, 'assistant_id') and getattr( + self, 'assistant_id') is not None: + _dict['assistant_id'] = getattr(self, 'assistant_id') + if hasattr(self, 'status_errors') and getattr( + self, 'status_errors') is not None: + status_errors_list = [] + for v in getattr(self, 'status_errors'): + if isinstance(v, dict): + status_errors_list.append(v) + else: + status_errors_list.append(v.to_dict()) + _dict['status_errors'] = status_errors_list + if hasattr(self, 'status_description') and getattr( + self, 'status_description') is not None: + _dict['status_description'] = getattr(self, 'status_description') + if hasattr(self, 'skill_impact_in_draft' + ) and self.skill_impact_in_draft is not None: + _dict['skill_impact_in_draft'] = self.skill_impact_in_draft + if hasattr(self, 'created') and getattr(self, 'created') is not None: + _dict['created'] = datetime_to_string(getattr(self, 'created')) + if hasattr(self, 'updated') and getattr(self, 'updated') is not None: + _dict['updated'] = datetime_to_string(getattr(self, 'updated')) + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MonitorAssistantReleaseImportArtifactResponse object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, + other: 'MonitorAssistantReleaseImportArtifactResponse') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, + other: 'MonitorAssistantReleaseImportArtifactResponse') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class StatusEnum(str, Enum): + """ + The current status of the release import process: + - **Completed**: The artifact import has completed. + - **Failed**: The asynchronous artifact import process has failed. + - **Processing**: An asynchronous operation to import the artifact is underway + and not yet completed. + """ + + COMPLETED = 'Completed' + FAILED = 'Failed' + PROCESSING = 'Processing' + + class SkillImpactInDraftEnum(str, Enum): + """ + The type of the skill in the draft environment. + """ + + ACTION = 'action' + DIALOG = 'dialog' + + +class Pagination: + """ + The pagination data for the returned objects. For more information about using + pagination, see [Pagination](#pagination). + + :param str refresh_url: The URL that will return the same page of results. + :param str next_url: (optional) The URL that will return the next page of + results. + :param int total: (optional) The total number of objects that satisfy the + request. This total includes all results, not just those included in the current + page. + :param int matched: (optional) Reserved for future use. + :param str refresh_cursor: (optional) A token identifying the current page of + results. + :param str next_cursor: (optional) A token identifying the next page of results. + """ + + def __init__( + self, + refresh_url: str, + *, + next_url: Optional[str] = None, + total: Optional[int] = None, + matched: Optional[int] = None, + refresh_cursor: Optional[str] = None, + next_cursor: Optional[str] = None, + ) -> None: + """ + Initialize a Pagination object. + + :param str refresh_url: The URL that will return the same page of results. + :param str next_url: (optional) The URL that will return the next page of + results. + :param int total: (optional) The total number of objects that satisfy the + request. This total includes all results, not just those included in the + current page. + :param int matched: (optional) Reserved for future use. + :param str refresh_cursor: (optional) A token identifying the current page + of results. + :param str next_cursor: (optional) A token identifying the next page of + results. + """ + self.refresh_url = refresh_url + self.next_url = next_url + self.total = total + self.matched = matched + self.refresh_cursor = refresh_cursor + self.next_cursor = next_cursor + + @classmethod + def from_dict(cls, _dict: Dict) -> 'Pagination': + """Initialize a Pagination object from a json dictionary.""" + args = {} + if (refresh_url := _dict.get('refresh_url')) is not None: + args['refresh_url'] = refresh_url + else: + raise ValueError( + 'Required property \'refresh_url\' not present in Pagination JSON' + ) + if (next_url := _dict.get('next_url')) is not None: + args['next_url'] = next_url + if (total := _dict.get('total')) is not None: + args['total'] = total + if (matched := _dict.get('matched')) is not None: + args['matched'] = matched + if (refresh_cursor := _dict.get('refresh_cursor')) is not None: + args['refresh_cursor'] = refresh_cursor + if (next_cursor := _dict.get('next_cursor')) is not None: + args['next_cursor'] = next_cursor + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Pagination object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'refresh_url') and self.refresh_url is not None: + _dict['refresh_url'] = self.refresh_url + if hasattr(self, 'next_url') and self.next_url is not None: + _dict['next_url'] = self.next_url + if hasattr(self, 'total') and self.total is not None: + _dict['total'] = self.total + if hasattr(self, 'matched') and self.matched is not None: + _dict['matched'] = self.matched + if hasattr(self, 'refresh_cursor') and self.refresh_cursor is not None: + _dict['refresh_cursor'] = self.refresh_cursor + if hasattr(self, 'next_cursor') and self.next_cursor is not None: + _dict['next_cursor'] = self.next_cursor + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Pagination object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'Pagination') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'Pagination') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class PartialItem: + """ + Message response partial item content. + + :param str response_type: (optional) The type of response returned by the dialog + node. The specified response type must be supported by the client application or + channel. + :param str text: The text within the partial chunk of the message stream + response. + :param Metadata streaming_metadata: Contains meta-information about the item(s) + being streamed. + """ + + def __init__( + self, + text: str, + streaming_metadata: 'Metadata', + *, + response_type: Optional[str] = None, + ) -> None: + """ + Initialize a PartialItem object. + + :param str text: The text within the partial chunk of the message stream + response. + :param Metadata streaming_metadata: Contains meta-information about the + item(s) being streamed. + :param str response_type: (optional) The type of response returned by the + dialog node. The specified response type must be supported by the client + application or channel. + """ + self.response_type = response_type + self.text = text + self.streaming_metadata = streaming_metadata + + @classmethod + def from_dict(cls, _dict: Dict) -> 'PartialItem': + """Initialize a PartialItem object from a json dictionary.""" + args = {} + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type + if (text := _dict.get('text')) is not None: + args['text'] = text + else: + raise ValueError( + 'Required property \'text\' not present in PartialItem JSON') + if (streaming_metadata := _dict.get('streaming_metadata')) is not None: + args['streaming_metadata'] = Metadata.from_dict(streaming_metadata) + else: + raise ValueError( + 'Required property \'streaming_metadata\' not present in PartialItem JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a PartialItem object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, 'text') and self.text is not None: + _dict['text'] = self.text + if hasattr( + self, + 'streaming_metadata') and self.streaming_metadata is not None: + if isinstance(self.streaming_metadata, dict): + _dict['streaming_metadata'] = self.streaming_metadata + else: + _dict['streaming_metadata'] = self.streaming_metadata.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this PartialItem object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'PartialItem') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'PartialItem') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ProviderAuthenticationOAuth2: + """ + Non-private settings for oauth2 authentication. + + :param str preferred_flow: (optional) The preferred "flow" or "grant type" for + the API client to fetch an access token from the authorization server. + :param ProviderAuthenticationOAuth2Flows flows: (optional) Scenarios performed + by the API client to fetch an access token from the authorization server. + """ + + def __init__( + self, + *, + preferred_flow: Optional[str] = None, + flows: Optional['ProviderAuthenticationOAuth2Flows'] = None, + ) -> None: + """ + Initialize a ProviderAuthenticationOAuth2 object. + + :param str preferred_flow: (optional) The preferred "flow" or "grant type" + for the API client to fetch an access token from the authorization server. + :param ProviderAuthenticationOAuth2Flows flows: (optional) Scenarios + performed by the API client to fetch an access token from the authorization + server. + """ + self.preferred_flow = preferred_flow + self.flows = flows + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ProviderAuthenticationOAuth2': + """Initialize a ProviderAuthenticationOAuth2 object from a json dictionary.""" + args = {} + if (preferred_flow := _dict.get('preferred_flow')) is not None: + args['preferred_flow'] = preferred_flow + if (flows := _dict.get('flows')) is not None: + args['flows'] = flows + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ProviderAuthenticationOAuth2 object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'preferred_flow') and self.preferred_flow is not None: + _dict['preferred_flow'] = self.preferred_flow + if hasattr(self, 'flows') and self.flows is not None: + if isinstance(self.flows, dict): + _dict['flows'] = self.flows + else: + _dict['flows'] = self.flows.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ProviderAuthenticationOAuth2 object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ProviderAuthenticationOAuth2') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ProviderAuthenticationOAuth2') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class PreferredFlowEnum(str, Enum): + """ + The preferred "flow" or "grant type" for the API client to fetch an access token + from the authorization server. + """ + + PASSWORD = 'password' + CLIENT_CREDENTIALS = 'client_credentials' + AUTHORIZATION_CODE = 'authorization_code' + CUSTOM_FLOW_NAME = '<$custom_flow_name>' + + +class ProviderAuthenticationOAuth2Flows: + """ + Scenarios performed by the API client to fetch an access token from the authorization + server. + + """ + + def __init__(self,) -> None: + """ + Initialize a ProviderAuthenticationOAuth2Flows object. + + """ + msg = "Cannot instantiate base class. Instead, instantiate one of the defined subclasses: {0}".format( + ", ".join([ + 'ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password', + 'ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2ClientCredentials', + 'ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2AuthorizationCode' + ])) + raise Exception(msg) + + +class ProviderAuthenticationOAuth2PasswordUsername: + """ + The username for oauth2 authentication when the preferred flow is "password". + + :param str type: (optional) The type of property observed in "value". + :param str value: (optional) The stored information of the value. + """ + + def __init__( + self, + *, + type: Optional[str] = None, + value: Optional[str] = None, + ) -> None: + """ + Initialize a ProviderAuthenticationOAuth2PasswordUsername object. + + :param str type: (optional) The type of property observed in "value". + :param str value: (optional) The stored information of the value. + """ + self.type = type + self.value = value + + @classmethod + def from_dict( + cls, _dict: Dict) -> 'ProviderAuthenticationOAuth2PasswordUsername': + """Initialize a ProviderAuthenticationOAuth2PasswordUsername object from a json dictionary.""" + args = {} + if (type := _dict.get('type')) is not None: + args['type'] = type + if (value := _dict.get('value')) is not None: + args['value'] = value + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ProviderAuthenticationOAuth2PasswordUsername object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'value') and self.value is not None: + _dict['value'] = self.value + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ProviderAuthenticationOAuth2PasswordUsername object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, + other: 'ProviderAuthenticationOAuth2PasswordUsername') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, + other: 'ProviderAuthenticationOAuth2PasswordUsername') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class TypeEnum(str, Enum): + """ + The type of property observed in "value". + """ + + VALUE = 'value' + + +class ProviderAuthenticationTypeAndValue: + """ + ProviderAuthenticationTypeAndValue. + + :param str type: (optional) The type of property observed in "value". + :param str value: (optional) The stored information of the value. + """ + + def __init__( + self, + *, + type: Optional[str] = None, + value: Optional[str] = None, + ) -> None: + """ + Initialize a ProviderAuthenticationTypeAndValue object. + + :param str type: (optional) The type of property observed in "value". + :param str value: (optional) The stored information of the value. + """ + self.type = type + self.value = value + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ProviderAuthenticationTypeAndValue': + """Initialize a ProviderAuthenticationTypeAndValue object from a json dictionary.""" + args = {} + if (type := _dict.get('type')) is not None: + args['type'] = type + if (value := _dict.get('value')) is not None: + args['value'] = value + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ProviderAuthenticationTypeAndValue object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'value') and self.value is not None: + _dict['value'] = self.value + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ProviderAuthenticationTypeAndValue object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ProviderAuthenticationTypeAndValue') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ProviderAuthenticationTypeAndValue') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class TypeEnum(str, Enum): + """ + The type of property observed in "value". + """ + + VALUE = 'value' + + +class ProviderCollection: + """ + ProviderCollection. + + :param List[ProviderResponse] conversational_skill_providers: An array of + objects describing the conversational skill providers associated with the + instance. + :param Pagination pagination: The pagination data for the returned objects. For + more information about using pagination, see [Pagination](#pagination). + """ + + def __init__( + self, + conversational_skill_providers: List['ProviderResponse'], + pagination: 'Pagination', + ) -> None: + """ + Initialize a ProviderCollection object. + + :param List[ProviderResponse] conversational_skill_providers: An array of + objects describing the conversational skill providers associated with the + instance. + :param Pagination pagination: The pagination data for the returned objects. + For more information about using pagination, see [Pagination](#pagination). + """ + self.conversational_skill_providers = conversational_skill_providers + self.pagination = pagination + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ProviderCollection': + """Initialize a ProviderCollection object from a json dictionary.""" + args = {} + if (conversational_skill_providers := + _dict.get('conversational_skill_providers')) is not None: + args['conversational_skill_providers'] = [ + ProviderResponse.from_dict(v) + for v in conversational_skill_providers + ] + else: + raise ValueError( + 'Required property \'conversational_skill_providers\' not present in ProviderCollection JSON' + ) + if (pagination := _dict.get('pagination')) is not None: + args['pagination'] = Pagination.from_dict(pagination) + else: + raise ValueError( + 'Required property \'pagination\' not present in ProviderCollection JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ProviderCollection object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'conversational_skill_providers' + ) and self.conversational_skill_providers is not None: + conversational_skill_providers_list = [] + for v in self.conversational_skill_providers: + if isinstance(v, dict): + conversational_skill_providers_list.append(v) + else: + conversational_skill_providers_list.append(v.to_dict()) + _dict[ + 'conversational_skill_providers'] = conversational_skill_providers_list + if hasattr(self, 'pagination') and self.pagination is not None: + if isinstance(self.pagination, dict): + _dict['pagination'] = self.pagination + else: + _dict['pagination'] = self.pagination.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ProviderCollection object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ProviderCollection') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ProviderCollection') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ProviderPrivate: + """ + Private information of the provider. + + :param ProviderPrivateAuthentication authentication: Private authentication + information of the provider. + """ + + def __init__( + self, + authentication: 'ProviderPrivateAuthentication', + ) -> None: + """ + Initialize a ProviderPrivate object. + + :param ProviderPrivateAuthentication authentication: Private authentication + information of the provider. + """ + self.authentication = authentication + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ProviderPrivate': + """Initialize a ProviderPrivate object from a json dictionary.""" + args = {} + if (authentication := _dict.get('authentication')) is not None: + args['authentication'] = authentication + else: + raise ValueError( + 'Required property \'authentication\' not present in ProviderPrivate JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ProviderPrivate object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'authentication') and self.authentication is not None: + if isinstance(self.authentication, dict): + _dict['authentication'] = self.authentication + else: + _dict['authentication'] = self.authentication.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ProviderPrivate object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ProviderPrivate') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ProviderPrivate') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ProviderPrivateAuthentication: + """ + Private authentication information of the provider. + + """ + + def __init__(self,) -> None: + """ + Initialize a ProviderPrivateAuthentication object. + + """ + msg = "Cannot instantiate base class. Instead, instantiate one of the defined subclasses: {0}".format( + ", ".join([ + 'ProviderPrivateAuthenticationBearerFlow', + 'ProviderPrivateAuthenticationBasicFlow', + 'ProviderPrivateAuthenticationOAuth2Flow' + ])) + raise Exception(msg) + + +class ProviderPrivateAuthenticationOAuth2FlowFlows: + """ + Scenarios performed by the API client to fetch an access token from the authorization + server. + + """ + + def __init__(self,) -> None: + """ + Initialize a ProviderPrivateAuthenticationOAuth2FlowFlows object. + + """ + msg = "Cannot instantiate base class. Instead, instantiate one of the defined subclasses: {0}".format( + ", ".join([ + 'ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2Password', + 'ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2ClientCredentials', + 'ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2AuthorizationCode' + ])) + raise Exception(msg) + + +class ProviderPrivateAuthenticationOAuth2PasswordPassword: + """ + The password for oauth2 authentication when the preferred flow is "password". + + :param str type: (optional) The type of property observed in "value". + :param str value: (optional) The stored information of the value. + """ + + def __init__( + self, + *, + type: Optional[str] = None, + value: Optional[str] = None, + ) -> None: + """ + Initialize a ProviderPrivateAuthenticationOAuth2PasswordPassword object. + + :param str type: (optional) The type of property observed in "value". + :param str value: (optional) The stored information of the value. + """ + self.type = type + self.value = value + + @classmethod + def from_dict( + cls, _dict: Dict + ) -> 'ProviderPrivateAuthenticationOAuth2PasswordPassword': + """Initialize a ProviderPrivateAuthenticationOAuth2PasswordPassword object from a json dictionary.""" + args = {} + if (type := _dict.get('type')) is not None: + args['type'] = type + if (value := _dict.get('value')) is not None: + args['value'] = value + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ProviderPrivateAuthenticationOAuth2PasswordPassword object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'value') and self.value is not None: + _dict['value'] = self.value + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ProviderPrivateAuthenticationOAuth2PasswordPassword object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__( + self, other: 'ProviderPrivateAuthenticationOAuth2PasswordPassword' + ) -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__( + self, other: 'ProviderPrivateAuthenticationOAuth2PasswordPassword' + ) -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class TypeEnum(str, Enum): + """ + The type of property observed in "value". + """ + + VALUE = 'value' + + +class ProviderResponse: + """ + ProviderResponse. + + :param str provider_id: (optional) The unique identifier of the provider. + :param ProviderResponseSpecification specification: (optional) The specification + of the provider. + """ + + def __init__( + self, + *, + provider_id: Optional[str] = None, + specification: Optional['ProviderResponseSpecification'] = None, + ) -> None: + """ + Initialize a ProviderResponse object. + + :param str provider_id: (optional) The unique identifier of the provider. + :param ProviderResponseSpecification specification: (optional) The + specification of the provider. + """ + self.provider_id = provider_id + self.specification = specification + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ProviderResponse': + """Initialize a ProviderResponse object from a json dictionary.""" + args = {} + if (provider_id := _dict.get('provider_id')) is not None: + args['provider_id'] = provider_id + if (specification := _dict.get('specification')) is not None: + args['specification'] = ProviderResponseSpecification.from_dict( + specification) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ProviderResponse object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'provider_id') and self.provider_id is not None: + _dict['provider_id'] = self.provider_id + if hasattr(self, 'specification') and self.specification is not None: + if isinstance(self.specification, dict): + _dict['specification'] = self.specification + else: + _dict['specification'] = self.specification.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ProviderResponse object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ProviderResponse') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ProviderResponse') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ProviderResponseSpecification: + """ + The specification of the provider. + + :param List[ProviderResponseSpecificationServersItem] servers: (optional) An + array of objects defining all endpoints of the provider. + **Note:** Multiple array items are reserved for future use. + :param ProviderResponseSpecificationComponents components: (optional) An object + defining various reusable definitions of the provider. + """ + + def __init__( + self, + *, + servers: Optional[ + List['ProviderResponseSpecificationServersItem']] = None, + components: Optional['ProviderResponseSpecificationComponents'] = None, + ) -> None: + """ + Initialize a ProviderResponseSpecification object. + + :param List[ProviderResponseSpecificationServersItem] servers: (optional) + An array of objects defining all endpoints of the provider. + **Note:** Multiple array items are reserved for future use. + :param ProviderResponseSpecificationComponents components: (optional) An + object defining various reusable definitions of the provider. + """ + self.servers = servers + self.components = components + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ProviderResponseSpecification': + """Initialize a ProviderResponseSpecification object from a json dictionary.""" + args = {} + if (servers := _dict.get('servers')) is not None: + args['servers'] = [ + ProviderResponseSpecificationServersItem.from_dict(v) + for v in servers + ] + if (components := _dict.get('components')) is not None: + args[ + 'components'] = ProviderResponseSpecificationComponents.from_dict( + components) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ProviderResponseSpecification object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'servers') and self.servers is not None: + servers_list = [] + for v in self.servers: + if isinstance(v, dict): + servers_list.append(v) + else: + servers_list.append(v.to_dict()) + _dict['servers'] = servers_list + if hasattr(self, 'components') and self.components is not None: + if isinstance(self.components, dict): + _dict['components'] = self.components + else: + _dict['components'] = self.components.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ProviderResponseSpecification object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ProviderResponseSpecification') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ProviderResponseSpecification') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ProviderResponseSpecificationComponents: + """ + An object defining various reusable definitions of the provider. + + :param ProviderResponseSpecificationComponentsSecuritySchemes security_schemes: + (optional) The definition of the security scheme for the provider. + """ + + def __init__( + self, + *, + security_schemes: Optional[ + 'ProviderResponseSpecificationComponentsSecuritySchemes'] = None, + ) -> None: + """ + Initialize a ProviderResponseSpecificationComponents object. + + :param ProviderResponseSpecificationComponentsSecuritySchemes + security_schemes: (optional) The definition of the security scheme for the + provider. + """ + self.security_schemes = security_schemes + + @classmethod + def from_dict(cls, + _dict: Dict) -> 'ProviderResponseSpecificationComponents': + """Initialize a ProviderResponseSpecificationComponents object from a json dictionary.""" + args = {} + if (security_schemes := _dict.get('securitySchemes')) is not None: + args[ + 'security_schemes'] = ProviderResponseSpecificationComponentsSecuritySchemes.from_dict( + security_schemes) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ProviderResponseSpecificationComponents object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, + 'security_schemes') and self.security_schemes is not None: + if isinstance(self.security_schemes, dict): + _dict['securitySchemes'] = self.security_schemes + else: + _dict['securitySchemes'] = self.security_schemes.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ProviderResponseSpecificationComponents object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ProviderResponseSpecificationComponents') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ProviderResponseSpecificationComponents') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ProviderResponseSpecificationComponentsSecuritySchemes: + """ + The definition of the security scheme for the provider. + + :param str authentication_method: (optional) The authentication method required + for requests made from watsonx Assistant to the conversational skill provider. + :param ProviderResponseSpecificationComponentsSecuritySchemesBasic basic: + (optional) Non-private settings for basic access authentication. + :param ProviderAuthenticationOAuth2 oauth2: (optional) Non-private settings for + oauth2 authentication. + """ + + def __init__( + self, + *, + authentication_method: Optional[str] = None, + basic: Optional[ + 'ProviderResponseSpecificationComponentsSecuritySchemesBasic'] = None, + oauth2: Optional['ProviderAuthenticationOAuth2'] = None, + ) -> None: + """ + Initialize a ProviderResponseSpecificationComponentsSecuritySchemes object. + + :param str authentication_method: (optional) The authentication method + required for requests made from watsonx Assistant to the conversational + skill provider. + :param ProviderResponseSpecificationComponentsSecuritySchemesBasic basic: + (optional) Non-private settings for basic access authentication. + :param ProviderAuthenticationOAuth2 oauth2: (optional) Non-private settings + for oauth2 authentication. + """ + self.authentication_method = authentication_method + self.basic = basic + self.oauth2 = oauth2 + + @classmethod + def from_dict( + cls, _dict: Dict + ) -> 'ProviderResponseSpecificationComponentsSecuritySchemes': + """Initialize a ProviderResponseSpecificationComponentsSecuritySchemes object from a json dictionary.""" + args = {} + if (authentication_method := + _dict.get('authentication_method')) is not None: + args['authentication_method'] = authentication_method + if (basic := _dict.get('basic')) is not None: + args[ + 'basic'] = ProviderResponseSpecificationComponentsSecuritySchemesBasic.from_dict( + basic) + if (oauth2 := _dict.get('oauth2')) is not None: + args['oauth2'] = ProviderAuthenticationOAuth2.from_dict(oauth2) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ProviderResponseSpecificationComponentsSecuritySchemes object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'authentication_method' + ) and self.authentication_method is not None: + _dict['authentication_method'] = self.authentication_method + if hasattr(self, 'basic') and self.basic is not None: + if isinstance(self.basic, dict): + _dict['basic'] = self.basic + else: + _dict['basic'] = self.basic.to_dict() + if hasattr(self, 'oauth2') and self.oauth2 is not None: + if isinstance(self.oauth2, dict): + _dict['oauth2'] = self.oauth2 + else: + _dict['oauth2'] = self.oauth2.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ProviderResponseSpecificationComponentsSecuritySchemes object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__( + self, other: 'ProviderResponseSpecificationComponentsSecuritySchemes' + ) -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__( + self, other: 'ProviderResponseSpecificationComponentsSecuritySchemes' + ) -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class AuthenticationMethodEnum(str, Enum): + """ + The authentication method required for requests made from watsonx Assistant to the + conversational skill provider. + """ + + BASIC = 'basic' + BEARER = 'bearer' + API_KEY = 'api_key' + OAUTH2 = 'oauth2' + NONE = 'none' + + +class ProviderResponseSpecificationComponentsSecuritySchemesBasic: + """ + Non-private settings for basic access authentication. + + :param ProviderAuthenticationTypeAndValue username: (optional) The username for + basic access authentication. + """ + + def __init__( + self, + *, + username: Optional['ProviderAuthenticationTypeAndValue'] = None, + ) -> None: + """ + Initialize a ProviderResponseSpecificationComponentsSecuritySchemesBasic object. + + :param ProviderAuthenticationTypeAndValue username: (optional) The username + for basic access authentication. + """ + self.username = username + + @classmethod + def from_dict( + cls, _dict: Dict + ) -> 'ProviderResponseSpecificationComponentsSecuritySchemesBasic': + """Initialize a ProviderResponseSpecificationComponentsSecuritySchemesBasic object from a json dictionary.""" + args = {} + if (username := _dict.get('username')) is not None: + args['username'] = ProviderAuthenticationTypeAndValue.from_dict( + username) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ProviderResponseSpecificationComponentsSecuritySchemesBasic object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'username') and self.username is not None: + if isinstance(self.username, dict): + _dict['username'] = self.username + else: + _dict['username'] = self.username.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ProviderResponseSpecificationComponentsSecuritySchemesBasic object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__( + self, + other: 'ProviderResponseSpecificationComponentsSecuritySchemesBasic' + ) -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__( + self, + other: 'ProviderResponseSpecificationComponentsSecuritySchemesBasic' + ) -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ProviderResponseSpecificationServersItem: + """ + ProviderResponseSpecificationServersItem. + + :param str url: (optional) The URL of the conversational skill provider. + """ + + def __init__( + self, + *, + url: Optional[str] = None, + ) -> None: + """ + Initialize a ProviderResponseSpecificationServersItem object. + + :param str url: (optional) The URL of the conversational skill provider. + """ + self.url = url + + @classmethod + def from_dict(cls, + _dict: Dict) -> 'ProviderResponseSpecificationServersItem': + """Initialize a ProviderResponseSpecificationServersItem object from a json dictionary.""" + args = {} + if (url := _dict.get('url')) is not None: + args['url'] = url + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ProviderResponseSpecificationServersItem object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'url') and self.url is not None: + _dict['url'] = self.url + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ProviderResponseSpecificationServersItem object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ProviderResponseSpecificationServersItem') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ProviderResponseSpecificationServersItem') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ProviderSpecification: + """ + The specification of the provider. + + :param List[ProviderSpecificationServersItem] servers: An array of objects + defining all endpoints of the provider. + **Note:** Multiple array items are reserved for future use. + :param ProviderSpecificationComponents components: (optional) An object defining + various reusable definitions of the provider. + """ + + def __init__( + self, + servers: List['ProviderSpecificationServersItem'], + *, + components: Optional['ProviderSpecificationComponents'] = None, + ) -> None: + """ + Initialize a ProviderSpecification object. + + :param List[ProviderSpecificationServersItem] servers: An array of objects + defining all endpoints of the provider. + **Note:** Multiple array items are reserved for future use. + :param ProviderSpecificationComponents components: (optional) An object + defining various reusable definitions of the provider. + """ + self.servers = servers + self.components = components + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ProviderSpecification': + """Initialize a ProviderSpecification object from a json dictionary.""" + args = {} + if (servers := _dict.get('servers')) is not None: + args['servers'] = [ + ProviderSpecificationServersItem.from_dict(v) for v in servers + ] + else: + raise ValueError( + 'Required property \'servers\' not present in ProviderSpecification JSON' + ) + if (components := _dict.get('components')) is not None: + args['components'] = ProviderSpecificationComponents.from_dict( + components) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ProviderSpecification object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'servers') and self.servers is not None: + servers_list = [] + for v in self.servers: + if isinstance(v, dict): + servers_list.append(v) + else: + servers_list.append(v.to_dict()) + _dict['servers'] = servers_list + if hasattr(self, 'components') and self.components is not None: + if isinstance(self.components, dict): + _dict['components'] = self.components + else: + _dict['components'] = self.components.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ProviderSpecification object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ProviderSpecification') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ProviderSpecification') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ProviderSpecificationComponents: + """ + An object defining various reusable definitions of the provider. + + :param ProviderSpecificationComponentsSecuritySchemes security_schemes: + (optional) The definition of the security scheme for the provider. + """ + + def __init__( + self, + *, + security_schemes: Optional[ + 'ProviderSpecificationComponentsSecuritySchemes'] = None, + ) -> None: + """ + Initialize a ProviderSpecificationComponents object. + + :param ProviderSpecificationComponentsSecuritySchemes security_schemes: + (optional) The definition of the security scheme for the provider. + """ + self.security_schemes = security_schemes + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ProviderSpecificationComponents': + """Initialize a ProviderSpecificationComponents object from a json dictionary.""" + args = {} + if (security_schemes := _dict.get('securitySchemes')) is not None: + args[ + 'security_schemes'] = ProviderSpecificationComponentsSecuritySchemes.from_dict( + security_schemes) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ProviderSpecificationComponents object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, + 'security_schemes') and self.security_schemes is not None: + if isinstance(self.security_schemes, dict): + _dict['securitySchemes'] = self.security_schemes + else: + _dict['securitySchemes'] = self.security_schemes.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ProviderSpecificationComponents object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ProviderSpecificationComponents') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ProviderSpecificationComponents') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ProviderSpecificationComponentsSecuritySchemes: + """ + The definition of the security scheme for the provider. + + :param str authentication_method: (optional) The authentication method required + for requests made from watsonx Assistant to the conversational skill provider. + :param ProviderSpecificationComponentsSecuritySchemesBasic basic: (optional) + Non-private settings for basic access authentication. + :param ProviderAuthenticationOAuth2 oauth2: (optional) Non-private settings for + oauth2 authentication. + """ + + def __init__( + self, + *, + authentication_method: Optional[str] = None, + basic: Optional[ + 'ProviderSpecificationComponentsSecuritySchemesBasic'] = None, + oauth2: Optional['ProviderAuthenticationOAuth2'] = None, + ) -> None: + """ + Initialize a ProviderSpecificationComponentsSecuritySchemes object. + + :param str authentication_method: (optional) The authentication method + required for requests made from watsonx Assistant to the conversational + skill provider. + :param ProviderSpecificationComponentsSecuritySchemesBasic basic: + (optional) Non-private settings for basic access authentication. + :param ProviderAuthenticationOAuth2 oauth2: (optional) Non-private settings + for oauth2 authentication. + """ + self.authentication_method = authentication_method + self.basic = basic + self.oauth2 = oauth2 + + @classmethod + def from_dict( + cls, + _dict: Dict) -> 'ProviderSpecificationComponentsSecuritySchemes': + """Initialize a ProviderSpecificationComponentsSecuritySchemes object from a json dictionary.""" + args = {} + if (authentication_method := + _dict.get('authentication_method')) is not None: + args['authentication_method'] = authentication_method + if (basic := _dict.get('basic')) is not None: + args[ + 'basic'] = ProviderSpecificationComponentsSecuritySchemesBasic.from_dict( + basic) + if (oauth2 := _dict.get('oauth2')) is not None: + args['oauth2'] = ProviderAuthenticationOAuth2.from_dict(oauth2) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ProviderSpecificationComponentsSecuritySchemes object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'authentication_method' + ) and self.authentication_method is not None: + _dict['authentication_method'] = self.authentication_method + if hasattr(self, 'basic') and self.basic is not None: + if isinstance(self.basic, dict): + _dict['basic'] = self.basic + else: + _dict['basic'] = self.basic.to_dict() + if hasattr(self, 'oauth2') and self.oauth2 is not None: + if isinstance(self.oauth2, dict): + _dict['oauth2'] = self.oauth2 + else: + _dict['oauth2'] = self.oauth2.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ProviderSpecificationComponentsSecuritySchemes object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, + other: 'ProviderSpecificationComponentsSecuritySchemes') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, + other: 'ProviderSpecificationComponentsSecuritySchemes') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class AuthenticationMethodEnum(str, Enum): + """ + The authentication method required for requests made from watsonx Assistant to the + conversational skill provider. + """ + + BASIC = 'basic' + BEARER = 'bearer' + API_KEY = 'api_key' + OAUTH2 = 'oauth2' + NONE = 'none' + + +class ProviderSpecificationComponentsSecuritySchemesBasic: + """ + Non-private settings for basic access authentication. + + :param ProviderAuthenticationTypeAndValue username: (optional) The username for + basic access authentication. + """ + + def __init__( + self, + *, + username: Optional['ProviderAuthenticationTypeAndValue'] = None, + ) -> None: + """ + Initialize a ProviderSpecificationComponentsSecuritySchemesBasic object. + + :param ProviderAuthenticationTypeAndValue username: (optional) The username + for basic access authentication. + """ + self.username = username + + @classmethod + def from_dict( + cls, _dict: Dict + ) -> 'ProviderSpecificationComponentsSecuritySchemesBasic': + """Initialize a ProviderSpecificationComponentsSecuritySchemesBasic object from a json dictionary.""" + args = {} + if (username := _dict.get('username')) is not None: + args['username'] = ProviderAuthenticationTypeAndValue.from_dict( + username) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ProviderSpecificationComponentsSecuritySchemesBasic object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'username') and self.username is not None: + if isinstance(self.username, dict): + _dict['username'] = self.username + else: + _dict['username'] = self.username.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ProviderSpecificationComponentsSecuritySchemesBasic object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__( + self, other: 'ProviderSpecificationComponentsSecuritySchemesBasic' + ) -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__( + self, other: 'ProviderSpecificationComponentsSecuritySchemesBasic' + ) -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ProviderSpecificationServersItem: + """ + ProviderSpecificationServersItem. + + :param str url: (optional) The URL of the conversational skill provider. + """ + + def __init__( + self, + *, + url: Optional[str] = None, + ) -> None: + """ + Initialize a ProviderSpecificationServersItem object. + + :param str url: (optional) The URL of the conversational skill provider. + """ + self.url = url + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ProviderSpecificationServersItem': + """Initialize a ProviderSpecificationServersItem object from a json dictionary.""" + args = {} + if (url := _dict.get('url')) is not None: + args['url'] = url + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ProviderSpecificationServersItem object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'url') and self.url is not None: + _dict['url'] = self.url + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ProviderSpecificationServersItem object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ProviderSpecificationServersItem') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ProviderSpecificationServersItem') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Release: + """ + Release. + + :param str release: (optional) The name of the release. The name is the version + number (an integer), returned as a string. + :param str description: (optional) The description of the release. + :param List[EnvironmentReference] environment_references: (optional) An array of + objects describing the environments where this release has been deployed. + :param ReleaseContent content: (optional) An object identifying the versionable + content objects (such as skill snapshots) that are included in the release. + :param str status: (optional) The current status of the release: + - **Available**: The release is available for deployment. + - **Failed**: An asynchronous publish operation has failed. + - **Processing**: An asynchronous publish operation has not yet completed. + :param datetime created: (optional) The timestamp for creation of the object. + :param datetime updated: (optional) The timestamp for the most recent update to + the object. + """ + + def __init__( + self, + *, + release: Optional[str] = None, + description: Optional[str] = None, + environment_references: Optional[List['EnvironmentReference']] = None, + content: Optional['ReleaseContent'] = None, + status: Optional[str] = None, + created: Optional[datetime] = None, + updated: Optional[datetime] = None, + ) -> None: + """ + Initialize a Release object. + + :param str description: (optional) The description of the release. + """ + self.release = release + self.description = description + self.environment_references = environment_references + self.content = content + self.status = status + self.created = created + self.updated = updated + + @classmethod + def from_dict(cls, _dict: Dict) -> 'Release': + """Initialize a Release object from a json dictionary.""" + args = {} + if (release := _dict.get('release')) is not None: + args['release'] = release + if (description := _dict.get('description')) is not None: + args['description'] = description + if (environment_references := + _dict.get('environment_references')) is not None: + args['environment_references'] = [ + EnvironmentReference.from_dict(v) + for v in environment_references + ] + if (content := _dict.get('content')) is not None: + args['content'] = ReleaseContent.from_dict(content) + if (status := _dict.get('status')) is not None: + args['status'] = status + if (created := _dict.get('created')) is not None: + args['created'] = string_to_datetime(created) + if (updated := _dict.get('updated')) is not None: + args['updated'] = string_to_datetime(updated) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Release object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'release') and getattr(self, 'release') is not None: + _dict['release'] = getattr(self, 'release') + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'environment_references') and getattr( + self, 'environment_references') is not None: + environment_references_list = [] + for v in getattr(self, 'environment_references'): + if isinstance(v, dict): + environment_references_list.append(v) + else: + environment_references_list.append(v.to_dict()) + _dict['environment_references'] = environment_references_list + if hasattr(self, 'content') and getattr(self, 'content') is not None: + if isinstance(getattr(self, 'content'), dict): + _dict['content'] = getattr(self, 'content') + else: + _dict['content'] = getattr(self, 'content').to_dict() + if hasattr(self, 'status') and getattr(self, 'status') is not None: + _dict['status'] = getattr(self, 'status') + if hasattr(self, 'created') and getattr(self, 'created') is not None: + _dict['created'] = datetime_to_string(getattr(self, 'created')) + if hasattr(self, 'updated') and getattr(self, 'updated') is not None: + _dict['updated'] = datetime_to_string(getattr(self, 'updated')) + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Release object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'Release') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'Release') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class StatusEnum(str, Enum): + """ + The current status of the release: + - **Available**: The release is available for deployment. + - **Failed**: An asynchronous publish operation has failed. + - **Processing**: An asynchronous publish operation has not yet completed. + """ + + AVAILABLE = 'Available' + FAILED = 'Failed' + PROCESSING = 'Processing' + + +class ReleaseCollection: + """ + ReleaseCollection. + + :param List[Release] releases: An array of objects describing the releases + associated with an assistant. + :param Pagination pagination: The pagination data for the returned objects. For + more information about using pagination, see [Pagination](#pagination). + """ + + def __init__( + self, + releases: List['Release'], + pagination: 'Pagination', + ) -> None: + """ + Initialize a ReleaseCollection object. + + :param List[Release] releases: An array of objects describing the releases + associated with an assistant. + :param Pagination pagination: The pagination data for the returned objects. + For more information about using pagination, see [Pagination](#pagination). + """ + self.releases = releases + self.pagination = pagination + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ReleaseCollection': + """Initialize a ReleaseCollection object from a json dictionary.""" + args = {} + if (releases := _dict.get('releases')) is not None: + args['releases'] = [Release.from_dict(v) for v in releases] + else: + raise ValueError( + 'Required property \'releases\' not present in ReleaseCollection JSON' + ) + if (pagination := _dict.get('pagination')) is not None: + args['pagination'] = Pagination.from_dict(pagination) + else: + raise ValueError( + 'Required property \'pagination\' not present in ReleaseCollection JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ReleaseCollection object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'releases') and self.releases is not None: + releases_list = [] + for v in self.releases: + if isinstance(v, dict): + releases_list.append(v) + else: + releases_list.append(v.to_dict()) + _dict['releases'] = releases_list + if hasattr(self, 'pagination') and self.pagination is not None: + if isinstance(self.pagination, dict): + _dict['pagination'] = self.pagination + else: + _dict['pagination'] = self.pagination.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ReleaseCollection object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ReleaseCollection') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ReleaseCollection') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ReleaseContent: + """ + An object identifying the versionable content objects (such as skill snapshots) that + are included in the release. + + :param List[ReleaseSkill] skills: (optional) The skill snapshots that are + included in the release. + """ + + def __init__( + self, + *, + skills: Optional[List['ReleaseSkill']] = None, + ) -> None: + """ + Initialize a ReleaseContent object. + + """ + self.skills = skills + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ReleaseContent': + """Initialize a ReleaseContent object from a json dictionary.""" + args = {} + if (skills := _dict.get('skills')) is not None: + args['skills'] = [ReleaseSkill.from_dict(v) for v in skills] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ReleaseContent object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'skills') and getattr(self, 'skills') is not None: + skills_list = [] + for v in getattr(self, 'skills'): + if isinstance(v, dict): + skills_list.append(v) + else: + skills_list.append(v.to_dict()) + _dict['skills'] = skills_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ReleaseContent object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ReleaseContent') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ReleaseContent') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ReleaseSkill: + """ + ReleaseSkill. + + :param str skill_id: The skill ID of the skill. + :param str type: (optional) The type of the skill. + :param str snapshot: (optional) The name of the skill snapshot that is saved as + part of the release (for example, `draft` or `1`). + """ + + def __init__( + self, + skill_id: str, + *, + type: Optional[str] = None, + snapshot: Optional[str] = None, + ) -> None: + """ + Initialize a ReleaseSkill object. + + :param str skill_id: The skill ID of the skill. + :param str type: (optional) The type of the skill. + :param str snapshot: (optional) The name of the skill snapshot that is + saved as part of the release (for example, `draft` or `1`). + """ + self.skill_id = skill_id + self.type = type + self.snapshot = snapshot + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ReleaseSkill': + """Initialize a ReleaseSkill object from a json dictionary.""" + args = {} + if (skill_id := _dict.get('skill_id')) is not None: + args['skill_id'] = skill_id + else: + raise ValueError( + 'Required property \'skill_id\' not present in ReleaseSkill JSON' + ) + if (type := _dict.get('type')) is not None: + args['type'] = type + if (snapshot := _dict.get('snapshot')) is not None: + args['snapshot'] = snapshot + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ReleaseSkill object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'skill_id') and self.skill_id is not None: + _dict['skill_id'] = self.skill_id + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'snapshot') and self.snapshot is not None: + _dict['snapshot'] = self.snapshot + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ReleaseSkill object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ReleaseSkill') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ReleaseSkill') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class TypeEnum(str, Enum): + """ + The type of the skill. + """ + + DIALOG = 'dialog' + ACTION = 'action' + SEARCH = 'search' + + +class RequestAnalytics: + """ + An optional object containing analytics data. Currently, this data is used only for + events sent to the Segment extension. + + :param str browser: (optional) The browser that was used to send the message + that triggered the event. + :param str device: (optional) The type of device that was used to send the + message that triggered the event. + :param str page_url: (optional) The URL of the web page that was used to send + the message that triggered the event. + """ + + def __init__( + self, + *, + browser: Optional[str] = None, + device: Optional[str] = None, + page_url: Optional[str] = None, + ) -> None: + """ + Initialize a RequestAnalytics object. + + :param str browser: (optional) The browser that was used to send the + message that triggered the event. + :param str device: (optional) The type of device that was used to send the + message that triggered the event. + :param str page_url: (optional) The URL of the web page that was used to + send the message that triggered the event. + """ + self.browser = browser + self.device = device + self.page_url = page_url + + @classmethod + def from_dict(cls, _dict: Dict) -> 'RequestAnalytics': + """Initialize a RequestAnalytics object from a json dictionary.""" + args = {} + if (browser := _dict.get('browser')) is not None: + args['browser'] = browser + if (device := _dict.get('device')) is not None: + args['device'] = device + if (page_url := _dict.get('pageUrl')) is not None: + args['page_url'] = page_url + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a RequestAnalytics object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'browser') and self.browser is not None: + _dict['browser'] = self.browser + if hasattr(self, 'device') and self.device is not None: + _dict['device'] = self.device + if hasattr(self, 'page_url') and self.page_url is not None: + _dict['pageUrl'] = self.page_url + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this RequestAnalytics object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'RequestAnalytics') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'RequestAnalytics') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ResponseGenericChannel: + """ + ResponseGenericChannel. + + :param str channel: (optional) A channel for which the response is intended. + """ + + def __init__( + self, + *, + channel: Optional[str] = None, + ) -> None: + """ + Initialize a ResponseGenericChannel object. + + :param str channel: (optional) A channel for which the response is + intended. + """ + self.channel = channel + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ResponseGenericChannel': + """Initialize a ResponseGenericChannel object from a json dictionary.""" + args = {} + if (channel := _dict.get('channel')) is not None: + args['channel'] = channel + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ResponseGenericChannel object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'channel') and self.channel is not None: + _dict['channel'] = self.channel + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ResponseGenericChannel object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ResponseGenericChannel') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ResponseGenericChannel') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ResponseGenericCitation: + """ + ResponseGenericCitation. + + :param str title: The title of the citation text. + :param str text: The text of the citation. + :param str body: The body content of the citation. + :param int search_result_index: (optional) The index of the search_result where + the citation is generated. + :param List[ResponseGenericCitationRangesItem] ranges: The offsets of the start + and end of the citation in the generated response. For example, `ranges:[ { + start:0, end:5 }, ...]`. + """ + + def __init__( + self, + title: str, + text: str, + body: str, + ranges: List['ResponseGenericCitationRangesItem'], + *, + search_result_index: Optional[int] = None, + ) -> None: + """ + Initialize a ResponseGenericCitation object. + + :param str title: The title of the citation text. + :param str text: The text of the citation. + :param str body: The body content of the citation. + :param List[ResponseGenericCitationRangesItem] ranges: The offsets of the + start and end of the citation in the generated response. For example, + `ranges:[ { start:0, end:5 }, ...]`. + :param int search_result_index: (optional) The index of the search_result + where the citation is generated. + """ + self.title = title + self.text = text + self.body = body + self.search_result_index = search_result_index + self.ranges = ranges + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ResponseGenericCitation': + """Initialize a ResponseGenericCitation object from a json dictionary.""" + args = {} + if (title := _dict.get('title')) is not None: + args['title'] = title + else: + raise ValueError( + 'Required property \'title\' not present in ResponseGenericCitation JSON' + ) + if (text := _dict.get('text')) is not None: + args['text'] = text + else: + raise ValueError( + 'Required property \'text\' not present in ResponseGenericCitation JSON' + ) + if (body := _dict.get('body')) is not None: + args['body'] = body + else: + raise ValueError( + 'Required property \'body\' not present in ResponseGenericCitation JSON' + ) + if (search_result_index := + _dict.get('search_result_index')) is not None: + args['search_result_index'] = search_result_index + if (ranges := _dict.get('ranges')) is not None: + args['ranges'] = [ + ResponseGenericCitationRangesItem.from_dict(v) for v in ranges + ] + else: + raise ValueError( + 'Required property \'ranges\' not present in ResponseGenericCitation JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ResponseGenericCitation object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'title') and self.title is not None: + _dict['title'] = self.title + if hasattr(self, 'text') and self.text is not None: + _dict['text'] = self.text + if hasattr(self, 'body') and self.body is not None: + _dict['body'] = self.body + if hasattr( + self, + 'search_result_index') and self.search_result_index is not None: + _dict['search_result_index'] = self.search_result_index + if hasattr(self, 'ranges') and self.ranges is not None: + ranges_list = [] + for v in self.ranges: + if isinstance(v, dict): + ranges_list.append(v) + else: + ranges_list.append(v.to_dict()) + _dict['ranges'] = ranges_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ResponseGenericCitation object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ResponseGenericCitation') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ResponseGenericCitation') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ResponseGenericCitationRangesItem: + """ + ResponseGenericCitationRangesItem. + + :param int start: (optional) The offset of the start of the citation in the + generated response. + :param int end: (optional) The offset of the end of the citation in the + generated response. + """ + + def __init__( + self, + *, + start: Optional[int] = None, + end: Optional[int] = None, + ) -> None: + """ + Initialize a ResponseGenericCitationRangesItem object. + + :param int start: (optional) The offset of the start of the citation in the + generated response. + :param int end: (optional) The offset of the end of the citation in the + generated response. + """ + self.start = start + self.end = end + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ResponseGenericCitationRangesItem': + """Initialize a ResponseGenericCitationRangesItem object from a json dictionary.""" + args = {} + if (start := _dict.get('start')) is not None: + args['start'] = start + if (end := _dict.get('end')) is not None: + args['end'] = end + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ResponseGenericCitationRangesItem object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'start') and self.start is not None: + _dict['start'] = self.start + if hasattr(self, 'end') and self.end is not None: + _dict['end'] = self.end + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ResponseGenericCitationRangesItem object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ResponseGenericCitationRangesItem') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ResponseGenericCitationRangesItem') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ResponseGenericConfidenceScores: + """ + The confidence scores for determining whether to show the generated response or an “I + don't know” response. + + :param float threshold: (optional) The confidence score threshold. If either the + pre_gen or post_gen score is below this threshold, it shows an “I don't know” + response to replace the generated text. You can configure the threshold in + either the user interface or through the Update skill API. For more information, + see the [watsonx Assistant documentation]( + https://cloud.ibm.com/docs/watson-assistant?topic=watson-assistant-conversational-search#behavioral-tuning-conversational-search). + :param float pre_gen: (optional) The confidence score based on user query and + search results. + :param float post_gen: (optional) The confidence score based on user query, + search results, and the generated response. + :param float extractiveness: (optional) It indicates how extractive the + generated response is from the search results. + """ + + def __init__( + self, + *, + threshold: Optional[float] = None, + pre_gen: Optional[float] = None, + post_gen: Optional[float] = None, + extractiveness: Optional[float] = None, + ) -> None: + """ + Initialize a ResponseGenericConfidenceScores object. + + :param float threshold: (optional) The confidence score threshold. If + either the pre_gen or post_gen score is below this threshold, it shows an + “I don't know” response to replace the generated text. You can configure + the threshold in either the user interface or through the Update skill API. + For more information, see the [watsonx Assistant documentation]( + https://cloud.ibm.com/docs/watson-assistant?topic=watson-assistant-conversational-search#behavioral-tuning-conversational-search). + :param float pre_gen: (optional) The confidence score based on user query + and search results. + :param float post_gen: (optional) The confidence score based on user query, + search results, and the generated response. + :param float extractiveness: (optional) It indicates how extractive the + generated response is from the search results. + """ + self.threshold = threshold + self.pre_gen = pre_gen + self.post_gen = post_gen + self.extractiveness = extractiveness + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ResponseGenericConfidenceScores': + """Initialize a ResponseGenericConfidenceScores object from a json dictionary.""" + args = {} + if (threshold := _dict.get('threshold')) is not None: + args['threshold'] = threshold + if (pre_gen := _dict.get('pre_gen')) is not None: + args['pre_gen'] = pre_gen + if (post_gen := _dict.get('post_gen')) is not None: + args['post_gen'] = post_gen + if (extractiveness := _dict.get('extractiveness')) is not None: + args['extractiveness'] = extractiveness + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ResponseGenericConfidenceScores object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'threshold') and self.threshold is not None: + _dict['threshold'] = self.threshold + if hasattr(self, 'pre_gen') and self.pre_gen is not None: + _dict['pre_gen'] = self.pre_gen + if hasattr(self, 'post_gen') and self.post_gen is not None: + _dict['post_gen'] = self.post_gen + if hasattr(self, 'extractiveness') and self.extractiveness is not None: + _dict['extractiveness'] = self.extractiveness + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ResponseGenericConfidenceScores object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ResponseGenericConfidenceScores') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ResponseGenericConfidenceScores') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class RuntimeEntity: + """ + The entity value that was recognized in the user input. + + :param str entity: An entity detected in the input. + :param List[int] location: (optional) An array of zero-based character offsets + that indicate where the detected entity values begin and end in the input text. + :param str value: The term in the input text that was recognized as an entity + value. + :param float confidence: (optional) A decimal percentage that represents + confidence in the recognized entity. + :param List[CaptureGroup] groups: (optional) The recognized capture groups for + the entity, as defined by the entity pattern. + :param RuntimeEntityInterpretation interpretation: (optional) An object + containing detailed information about the entity recognized in the user input. + This property is included only if the new system entities are enabled for the + skill. + For more information about how the new system entities are interpreted, see the + [documentation](https://cloud.ibm.com/docs/assistant?topic=assistant-beta-system-entities). + :param List[RuntimeEntityAlternative] alternatives: (optional) An array of + possible alternative values that the user might have intended instead of the + value returned in the **value** property. This property is returned only for + `@sys-time` and `@sys-date` entities when the user's input is ambiguous. + This property is included only if the new system entities are enabled for the + skill. + :param RuntimeEntityRole role: (optional) An object describing the role played + by a system entity that is specifies the beginning or end of a range recognized + in the user input. This property is included only if the new system entities are + enabled for the skill. + :param str skill: (optional) The skill that recognized the entity value. + Currently, the only possible values are `main skill` for the dialog skill (if + enabled) and `actions skill` for the action skill. + This property is present only if the assistant has both a dialog skill and an + action skill. + """ + + def __init__( + self, + entity: str, + value: str, + *, + location: Optional[List[int]] = None, + confidence: Optional[float] = None, + groups: Optional[List['CaptureGroup']] = None, + interpretation: Optional['RuntimeEntityInterpretation'] = None, + alternatives: Optional[List['RuntimeEntityAlternative']] = None, + role: Optional['RuntimeEntityRole'] = None, + skill: Optional[str] = None, + ) -> None: + """ + Initialize a RuntimeEntity object. + + :param str entity: An entity detected in the input. + :param str value: The term in the input text that was recognized as an + entity value. + :param List[int] location: (optional) An array of zero-based character + offsets that indicate where the detected entity values begin and end in the + input text. + :param float confidence: (optional) A decimal percentage that represents + confidence in the recognized entity. + :param List[CaptureGroup] groups: (optional) The recognized capture groups + for the entity, as defined by the entity pattern. + :param RuntimeEntityInterpretation interpretation: (optional) An object + containing detailed information about the entity recognized in the user + input. This property is included only if the new system entities are + enabled for the skill. + For more information about how the new system entities are interpreted, see + the + [documentation](https://cloud.ibm.com/docs/assistant?topic=assistant-beta-system-entities). + :param List[RuntimeEntityAlternative] alternatives: (optional) An array of + possible alternative values that the user might have intended instead of + the value returned in the **value** property. This property is returned + only for `@sys-time` and `@sys-date` entities when the user's input is + ambiguous. + This property is included only if the new system entities are enabled for + the skill. + :param RuntimeEntityRole role: (optional) An object describing the role + played by a system entity that is specifies the beginning or end of a range + recognized in the user input. This property is included only if the new + system entities are enabled for the skill. + :param str skill: (optional) The skill that recognized the entity value. + Currently, the only possible values are `main skill` for the dialog skill + (if enabled) and `actions skill` for the action skill. + This property is present only if the assistant has both a dialog skill and + an action skill. + """ + self.entity = entity + self.location = location + self.value = value + self.confidence = confidence + self.groups = groups + self.interpretation = interpretation + self.alternatives = alternatives + self.role = role + self.skill = skill + + @classmethod + def from_dict(cls, _dict: Dict) -> 'RuntimeEntity': + """Initialize a RuntimeEntity object from a json dictionary.""" + args = {} + if (entity := _dict.get('entity')) is not None: + args['entity'] = entity + else: + raise ValueError( + 'Required property \'entity\' not present in RuntimeEntity JSON' + ) + if (location := _dict.get('location')) is not None: + args['location'] = location + if (value := _dict.get('value')) is not None: + args['value'] = value + else: + raise ValueError( + 'Required property \'value\' not present in RuntimeEntity JSON') + if (confidence := _dict.get('confidence')) is not None: + args['confidence'] = confidence + if (groups := _dict.get('groups')) is not None: + args['groups'] = [CaptureGroup.from_dict(v) for v in groups] + if (interpretation := _dict.get('interpretation')) is not None: + args['interpretation'] = RuntimeEntityInterpretation.from_dict( + interpretation) + if (alternatives := _dict.get('alternatives')) is not None: + args['alternatives'] = [ + RuntimeEntityAlternative.from_dict(v) for v in alternatives + ] + if (role := _dict.get('role')) is not None: + args['role'] = RuntimeEntityRole.from_dict(role) + if (skill := _dict.get('skill')) is not None: + args['skill'] = skill + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a RuntimeEntity object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'entity') and self.entity is not None: + _dict['entity'] = self.entity + if hasattr(self, 'location') and self.location is not None: + _dict['location'] = self.location + if hasattr(self, 'value') and self.value is not None: + _dict['value'] = self.value + if hasattr(self, 'confidence') and self.confidence is not None: + _dict['confidence'] = self.confidence + if hasattr(self, 'groups') and self.groups is not None: + groups_list = [] + for v in self.groups: + if isinstance(v, dict): + groups_list.append(v) + else: + groups_list.append(v.to_dict()) + _dict['groups'] = groups_list + if hasattr(self, 'interpretation') and self.interpretation is not None: + if isinstance(self.interpretation, dict): + _dict['interpretation'] = self.interpretation + else: + _dict['interpretation'] = self.interpretation.to_dict() + if hasattr(self, 'alternatives') and self.alternatives is not None: + alternatives_list = [] + for v in self.alternatives: + if isinstance(v, dict): + alternatives_list.append(v) + else: + alternatives_list.append(v.to_dict()) + _dict['alternatives'] = alternatives_list + if hasattr(self, 'role') and self.role is not None: + if isinstance(self.role, dict): + _dict['role'] = self.role + else: + _dict['role'] = self.role.to_dict() + if hasattr(self, 'skill') and self.skill is not None: + _dict['skill'] = self.skill + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this RuntimeEntity object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'RuntimeEntity') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'RuntimeEntity') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class RuntimeEntityAlternative: + """ + An alternative value for the recognized entity. + + :param str value: (optional) The entity value that was recognized in the user + input. + :param float confidence: (optional) A decimal percentage that represents + confidence in the recognized entity. + """ + + def __init__( + self, + *, + value: Optional[str] = None, + confidence: Optional[float] = None, + ) -> None: + """ + Initialize a RuntimeEntityAlternative object. + + :param str value: (optional) The entity value that was recognized in the + user input. + :param float confidence: (optional) A decimal percentage that represents + confidence in the recognized entity. + """ + self.value = value + self.confidence = confidence + + @classmethod + def from_dict(cls, _dict: Dict) -> 'RuntimeEntityAlternative': + """Initialize a RuntimeEntityAlternative object from a json dictionary.""" + args = {} + if (value := _dict.get('value')) is not None: + args['value'] = value + if (confidence := _dict.get('confidence')) is not None: + args['confidence'] = confidence + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a RuntimeEntityAlternative object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'value') and self.value is not None: + _dict['value'] = self.value + if hasattr(self, 'confidence') and self.confidence is not None: + _dict['confidence'] = self.confidence + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this RuntimeEntityAlternative object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'RuntimeEntityAlternative') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'RuntimeEntityAlternative') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class RuntimeEntityInterpretation: + """ + RuntimeEntityInterpretation. + + :param str calendar_type: (optional) The calendar used to represent a recognized + date (for example, `Gregorian`). + :param str datetime_link: (optional) A unique identifier used to associate a + recognized time and date. If the user input contains a date and time that are + mentioned together (for example, `Today at 5`, the same **datetime_link** value + is returned for both the `@sys-date` and `@sys-time` entities). + :param str festival: (optional) A locale-specific holiday name (such as + `thanksgiving` or `christmas`). This property is included when a `@sys-date` + entity is recognized based on a holiday name in the user input. + :param str granularity: (optional) The precision or duration of a time range + specified by a recognized `@sys-time` or `@sys-date` entity. + :param str range_link: (optional) A unique identifier used to associate multiple + recognized `@sys-date`, `@sys-time`, or `@sys-number` entities that are + recognized as a range of values in the user's input (for example, `from July 4 + until July 14` or `from 20 to 25`). + :param str range_modifier: (optional) The word in the user input that indicates + that a `sys-date` or `sys-time` entity is part of an implied range where only + one date or time is specified (for example, `since` or `until`). + :param float relative_day: (optional) A recognized mention of a relative day, + represented numerically as an offset from the current date (for example, `-1` + for `yesterday` or `10` for `in ten days`). + :param float relative_month: (optional) A recognized mention of a relative + month, represented numerically as an offset from the current month (for example, + `1` for `next month` or `-3` for `three months ago`). + :param float relative_week: (optional) A recognized mention of a relative week, + represented numerically as an offset from the current week (for example, `2` for + `in two weeks` or `-1` for `last week). + :param float relative_weekend: (optional) A recognized mention of a relative + date range for a weekend, represented numerically as an offset from the current + weekend (for example, `0` for `this weekend` or `-1` for `last weekend`). + :param float relative_year: (optional) A recognized mention of a relative year, + represented numerically as an offset from the current year (for example, `1` for + `next year` or `-5` for `five years ago`). + :param float specific_day: (optional) A recognized mention of a specific date, + represented numerically as the date within the month (for example, `30` for + `June 30`.). + :param str specific_day_of_week: (optional) A recognized mention of a specific + day of the week as a lowercase string (for example, `monday`). + :param float specific_month: (optional) A recognized mention of a specific + month, represented numerically (for example, `7` for `July`). + :param float specific_quarter: (optional) A recognized mention of a specific + quarter, represented numerically (for example, `3` for `the third quarter`). + :param float specific_year: (optional) A recognized mention of a specific year + (for example, `2016`). + :param float numeric_value: (optional) A recognized numeric value, represented + as an integer or double. + :param str subtype: (optional) The type of numeric value recognized in the user + input (`integer` or `rational`). + :param str part_of_day: (optional) A recognized term for a time that was + mentioned as a part of the day in the user's input (for example, `morning` or + `afternoon`). + :param float relative_hour: (optional) A recognized mention of a relative hour, + represented numerically as an offset from the current hour (for example, `3` for + `in three hours` or `-1` for `an hour ago`). + :param float relative_minute: (optional) A recognized mention of a relative + time, represented numerically as an offset in minutes from the current time (for + example, `5` for `in five minutes` or `-15` for `fifteen minutes ago`). + :param float relative_second: (optional) A recognized mention of a relative + time, represented numerically as an offset in seconds from the current time (for + example, `10` for `in ten seconds` or `-30` for `thirty seconds ago`). + :param float specific_hour: (optional) A recognized specific hour mentioned as + part of a time value (for example, `10` for `10:15 AM`.). + :param float specific_minute: (optional) A recognized specific minute mentioned + as part of a time value (for example, `15` for `10:15 AM`.). + :param float specific_second: (optional) A recognized specific second mentioned + as part of a time value (for example, `30` for `10:15:30 AM`.). + :param str timezone: (optional) A recognized time zone mentioned as part of a + time value (for example, `EST`). + """ + + def __init__( + self, + *, + calendar_type: Optional[str] = None, + datetime_link: Optional[str] = None, + festival: Optional[str] = None, + granularity: Optional[str] = None, + range_link: Optional[str] = None, + range_modifier: Optional[str] = None, + relative_day: Optional[float] = None, + relative_month: Optional[float] = None, + relative_week: Optional[float] = None, + relative_weekend: Optional[float] = None, + relative_year: Optional[float] = None, + specific_day: Optional[float] = None, + specific_day_of_week: Optional[str] = None, + specific_month: Optional[float] = None, + specific_quarter: Optional[float] = None, + specific_year: Optional[float] = None, + numeric_value: Optional[float] = None, + subtype: Optional[str] = None, + part_of_day: Optional[str] = None, + relative_hour: Optional[float] = None, + relative_minute: Optional[float] = None, + relative_second: Optional[float] = None, + specific_hour: Optional[float] = None, + specific_minute: Optional[float] = None, + specific_second: Optional[float] = None, + timezone: Optional[str] = None, + ) -> None: + """ + Initialize a RuntimeEntityInterpretation object. + + :param str calendar_type: (optional) The calendar used to represent a + recognized date (for example, `Gregorian`). + :param str datetime_link: (optional) A unique identifier used to associate + a recognized time and date. If the user input contains a date and time that + are mentioned together (for example, `Today at 5`, the same + **datetime_link** value is returned for both the `@sys-date` and + `@sys-time` entities). + :param str festival: (optional) A locale-specific holiday name (such as + `thanksgiving` or `christmas`). This property is included when a + `@sys-date` entity is recognized based on a holiday name in the user input. + :param str granularity: (optional) The precision or duration of a time + range specified by a recognized `@sys-time` or `@sys-date` entity. + :param str range_link: (optional) A unique identifier used to associate + multiple recognized `@sys-date`, `@sys-time`, or `@sys-number` entities + that are recognized as a range of values in the user's input (for example, + `from July 4 until July 14` or `from 20 to 25`). + :param str range_modifier: (optional) The word in the user input that + indicates that a `sys-date` or `sys-time` entity is part of an implied + range where only one date or time is specified (for example, `since` or + `until`). + :param float relative_day: (optional) A recognized mention of a relative + day, represented numerically as an offset from the current date (for + example, `-1` for `yesterday` or `10` for `in ten days`). + :param float relative_month: (optional) A recognized mention of a relative + month, represented numerically as an offset from the current month (for + example, `1` for `next month` or `-3` for `three months ago`). + :param float relative_week: (optional) A recognized mention of a relative + week, represented numerically as an offset from the current week (for + example, `2` for `in two weeks` or `-1` for `last week). + :param float relative_weekend: (optional) A recognized mention of a + relative date range for a weekend, represented numerically as an offset + from the current weekend (for example, `0` for `this weekend` or `-1` for + `last weekend`). + :param float relative_year: (optional) A recognized mention of a relative + year, represented numerically as an offset from the current year (for + example, `1` for `next year` or `-5` for `five years ago`). + :param float specific_day: (optional) A recognized mention of a specific + date, represented numerically as the date within the month (for example, + `30` for `June 30`.). + :param str specific_day_of_week: (optional) A recognized mention of a + specific day of the week as a lowercase string (for example, `monday`). + :param float specific_month: (optional) A recognized mention of a specific + month, represented numerically (for example, `7` for `July`). + :param float specific_quarter: (optional) A recognized mention of a + specific quarter, represented numerically (for example, `3` for `the third + quarter`). + :param float specific_year: (optional) A recognized mention of a specific + year (for example, `2016`). + :param float numeric_value: (optional) A recognized numeric value, + represented as an integer or double. + :param str subtype: (optional) The type of numeric value recognized in the + user input (`integer` or `rational`). + :param str part_of_day: (optional) A recognized term for a time that was + mentioned as a part of the day in the user's input (for example, `morning` + or `afternoon`). + :param float relative_hour: (optional) A recognized mention of a relative + hour, represented numerically as an offset from the current hour (for + example, `3` for `in three hours` or `-1` for `an hour ago`). + :param float relative_minute: (optional) A recognized mention of a relative + time, represented numerically as an offset in minutes from the current time + (for example, `5` for `in five minutes` or `-15` for `fifteen minutes + ago`). + :param float relative_second: (optional) A recognized mention of a relative + time, represented numerically as an offset in seconds from the current time + (for example, `10` for `in ten seconds` or `-30` for `thirty seconds ago`). + :param float specific_hour: (optional) A recognized specific hour mentioned + as part of a time value (for example, `10` for `10:15 AM`.). + :param float specific_minute: (optional) A recognized specific minute + mentioned as part of a time value (for example, `15` for `10:15 AM`.). + :param float specific_second: (optional) A recognized specific second + mentioned as part of a time value (for example, `30` for `10:15:30 AM`.). + :param str timezone: (optional) A recognized time zone mentioned as part of + a time value (for example, `EST`). + """ + self.calendar_type = calendar_type + self.datetime_link = datetime_link + self.festival = festival + self.granularity = granularity + self.range_link = range_link + self.range_modifier = range_modifier + self.relative_day = relative_day + self.relative_month = relative_month + self.relative_week = relative_week + self.relative_weekend = relative_weekend + self.relative_year = relative_year + self.specific_day = specific_day + self.specific_day_of_week = specific_day_of_week + self.specific_month = specific_month + self.specific_quarter = specific_quarter + self.specific_year = specific_year + self.numeric_value = numeric_value + self.subtype = subtype + self.part_of_day = part_of_day + self.relative_hour = relative_hour + self.relative_minute = relative_minute + self.relative_second = relative_second + self.specific_hour = specific_hour + self.specific_minute = specific_minute + self.specific_second = specific_second + self.timezone = timezone + + @classmethod + def from_dict(cls, _dict: Dict) -> 'RuntimeEntityInterpretation': + """Initialize a RuntimeEntityInterpretation object from a json dictionary.""" + args = {} + if (calendar_type := _dict.get('calendar_type')) is not None: + args['calendar_type'] = calendar_type + if (datetime_link := _dict.get('datetime_link')) is not None: + args['datetime_link'] = datetime_link + if (festival := _dict.get('festival')) is not None: + args['festival'] = festival + if (granularity := _dict.get('granularity')) is not None: + args['granularity'] = granularity + if (range_link := _dict.get('range_link')) is not None: + args['range_link'] = range_link + if (range_modifier := _dict.get('range_modifier')) is not None: + args['range_modifier'] = range_modifier + if (relative_day := _dict.get('relative_day')) is not None: + args['relative_day'] = relative_day + if (relative_month := _dict.get('relative_month')) is not None: + args['relative_month'] = relative_month + if (relative_week := _dict.get('relative_week')) is not None: + args['relative_week'] = relative_week + if (relative_weekend := _dict.get('relative_weekend')) is not None: + args['relative_weekend'] = relative_weekend + if (relative_year := _dict.get('relative_year')) is not None: + args['relative_year'] = relative_year + if (specific_day := _dict.get('specific_day')) is not None: + args['specific_day'] = specific_day + if (specific_day_of_week := + _dict.get('specific_day_of_week')) is not None: + args['specific_day_of_week'] = specific_day_of_week + if (specific_month := _dict.get('specific_month')) is not None: + args['specific_month'] = specific_month + if (specific_quarter := _dict.get('specific_quarter')) is not None: + args['specific_quarter'] = specific_quarter + if (specific_year := _dict.get('specific_year')) is not None: + args['specific_year'] = specific_year + if (numeric_value := _dict.get('numeric_value')) is not None: + args['numeric_value'] = numeric_value + if (subtype := _dict.get('subtype')) is not None: + args['subtype'] = subtype + if (part_of_day := _dict.get('part_of_day')) is not None: + args['part_of_day'] = part_of_day + if (relative_hour := _dict.get('relative_hour')) is not None: + args['relative_hour'] = relative_hour + if (relative_minute := _dict.get('relative_minute')) is not None: + args['relative_minute'] = relative_minute + if (relative_second := _dict.get('relative_second')) is not None: + args['relative_second'] = relative_second + if (specific_hour := _dict.get('specific_hour')) is not None: + args['specific_hour'] = specific_hour + if (specific_minute := _dict.get('specific_minute')) is not None: + args['specific_minute'] = specific_minute + if (specific_second := _dict.get('specific_second')) is not None: + args['specific_second'] = specific_second + if (timezone := _dict.get('timezone')) is not None: + args['timezone'] = timezone + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a RuntimeEntityInterpretation object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'calendar_type') and self.calendar_type is not None: + _dict['calendar_type'] = self.calendar_type + if hasattr(self, 'datetime_link') and self.datetime_link is not None: + _dict['datetime_link'] = self.datetime_link + if hasattr(self, 'festival') and self.festival is not None: + _dict['festival'] = self.festival + if hasattr(self, 'granularity') and self.granularity is not None: + _dict['granularity'] = self.granularity + if hasattr(self, 'range_link') and self.range_link is not None: + _dict['range_link'] = self.range_link + if hasattr(self, 'range_modifier') and self.range_modifier is not None: + _dict['range_modifier'] = self.range_modifier + if hasattr(self, 'relative_day') and self.relative_day is not None: + _dict['relative_day'] = self.relative_day + if hasattr(self, 'relative_month') and self.relative_month is not None: + _dict['relative_month'] = self.relative_month + if hasattr(self, 'relative_week') and self.relative_week is not None: + _dict['relative_week'] = self.relative_week + if hasattr(self, + 'relative_weekend') and self.relative_weekend is not None: + _dict['relative_weekend'] = self.relative_weekend + if hasattr(self, 'relative_year') and self.relative_year is not None: + _dict['relative_year'] = self.relative_year + if hasattr(self, 'specific_day') and self.specific_day is not None: + _dict['specific_day'] = self.specific_day + if hasattr(self, 'specific_day_of_week' + ) and self.specific_day_of_week is not None: + _dict['specific_day_of_week'] = self.specific_day_of_week + if hasattr(self, 'specific_month') and self.specific_month is not None: + _dict['specific_month'] = self.specific_month + if hasattr(self, + 'specific_quarter') and self.specific_quarter is not None: + _dict['specific_quarter'] = self.specific_quarter + if hasattr(self, 'specific_year') and self.specific_year is not None: + _dict['specific_year'] = self.specific_year + if hasattr(self, 'numeric_value') and self.numeric_value is not None: + _dict['numeric_value'] = self.numeric_value + if hasattr(self, 'subtype') and self.subtype is not None: + _dict['subtype'] = self.subtype + if hasattr(self, 'part_of_day') and self.part_of_day is not None: + _dict['part_of_day'] = self.part_of_day + if hasattr(self, 'relative_hour') and self.relative_hour is not None: + _dict['relative_hour'] = self.relative_hour + if hasattr(self, + 'relative_minute') and self.relative_minute is not None: + _dict['relative_minute'] = self.relative_minute + if hasattr(self, + 'relative_second') and self.relative_second is not None: + _dict['relative_second'] = self.relative_second + if hasattr(self, 'specific_hour') and self.specific_hour is not None: + _dict['specific_hour'] = self.specific_hour + if hasattr(self, + 'specific_minute') and self.specific_minute is not None: + _dict['specific_minute'] = self.specific_minute + if hasattr(self, + 'specific_second') and self.specific_second is not None: + _dict['specific_second'] = self.specific_second + if hasattr(self, 'timezone') and self.timezone is not None: + _dict['timezone'] = self.timezone + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this RuntimeEntityInterpretation object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'RuntimeEntityInterpretation') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'RuntimeEntityInterpretation') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class GranularityEnum(str, Enum): + """ + The precision or duration of a time range specified by a recognized `@sys-time` or + `@sys-date` entity. + """ + + DAY = 'day' + FORTNIGHT = 'fortnight' + HOUR = 'hour' + INSTANT = 'instant' + MINUTE = 'minute' + MONTH = 'month' + QUARTER = 'quarter' + SECOND = 'second' + WEEK = 'week' + WEEKEND = 'weekend' + YEAR = 'year' + + +class RuntimeEntityRole: + """ + An object describing the role played by a system entity that is specifies the + beginning or end of a range recognized in the user input. This property is included + only if the new system entities are enabled for the skill. + + :param str type: (optional) The relationship of the entity to the range. + """ + + def __init__( + self, + *, + type: Optional[str] = None, + ) -> None: + """ + Initialize a RuntimeEntityRole object. + + :param str type: (optional) The relationship of the entity to the range. + """ + self.type = type + + @classmethod + def from_dict(cls, _dict: Dict) -> 'RuntimeEntityRole': + """Initialize a RuntimeEntityRole object from a json dictionary.""" + args = {} + if (type := _dict.get('type')) is not None: + args['type'] = type + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a RuntimeEntityRole object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this RuntimeEntityRole object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'RuntimeEntityRole') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'RuntimeEntityRole') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class TypeEnum(str, Enum): + """ + The relationship of the entity to the range. + """ + + DATE_FROM = 'date_from' + DATE_TO = 'date_to' + NUMBER_FROM = 'number_from' + NUMBER_TO = 'number_to' + TIME_FROM = 'time_from' + TIME_TO = 'time_to' + + +class RuntimeIntent: + """ + An intent identified in the user input. + + :param str intent: The name of the recognized intent. + :param float confidence: (optional) A decimal percentage that represents + confidence in the intent. If you are specifying an intent as part of a request, + but you do not have a calculated confidence value, specify `1`. + :param str skill: (optional) The skill that identified the intent. Currently, + the only possible values are `main skill` for the dialog skill (if enabled) and + `actions skill` for the action skill. + This property is present only if the assistant has both a dialog skill and an + action skill. + """ + + def __init__( + self, + intent: str, + *, + confidence: Optional[float] = None, + skill: Optional[str] = None, + ) -> None: + """ + Initialize a RuntimeIntent object. + + :param str intent: The name of the recognized intent. + :param float confidence: (optional) A decimal percentage that represents + confidence in the intent. If you are specifying an intent as part of a + request, but you do not have a calculated confidence value, specify `1`. + :param str skill: (optional) The skill that identified the intent. + Currently, the only possible values are `main skill` for the dialog skill + (if enabled) and `actions skill` for the action skill. + This property is present only if the assistant has both a dialog skill and + an action skill. + """ + self.intent = intent + self.confidence = confidence + self.skill = skill + + @classmethod + def from_dict(cls, _dict: Dict) -> 'RuntimeIntent': + """Initialize a RuntimeIntent object from a json dictionary.""" + args = {} + if (intent := _dict.get('intent')) is not None: + args['intent'] = intent + else: + raise ValueError( + 'Required property \'intent\' not present in RuntimeIntent JSON' + ) + if (confidence := _dict.get('confidence')) is not None: + args['confidence'] = confidence + if (skill := _dict.get('skill')) is not None: + args['skill'] = skill + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a RuntimeIntent object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'intent') and self.intent is not None: + _dict['intent'] = self.intent + if hasattr(self, 'confidence') and self.confidence is not None: + _dict['confidence'] = self.confidence + if hasattr(self, 'skill') and self.skill is not None: + _dict['skill'] = self.skill + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this RuntimeIntent object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'RuntimeIntent') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'RuntimeIntent') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class RuntimeResponseGeneric: + """ + RuntimeResponseGeneric. + + """ + + def __init__(self,) -> None: + """ + Initialize a RuntimeResponseGeneric object. + + """ + msg = "Cannot instantiate base class. Instead, instantiate one of the defined subclasses: {0}".format( + ", ".join([ + 'RuntimeResponseGenericRuntimeResponseTypeConversationalSearch', + 'RuntimeResponseGenericRuntimeResponseTypeText', + 'RuntimeResponseGenericRuntimeResponseTypePause', + 'RuntimeResponseGenericRuntimeResponseTypeImage', + 'RuntimeResponseGenericRuntimeResponseTypeOption', + 'RuntimeResponseGenericRuntimeResponseTypeConnectToAgent', + 'RuntimeResponseGenericRuntimeResponseTypeSuggestion', + 'RuntimeResponseGenericRuntimeResponseTypeChannelTransfer', + 'RuntimeResponseGenericRuntimeResponseTypeSearch', + 'RuntimeResponseGenericRuntimeResponseTypeUserDefined', + 'RuntimeResponseGenericRuntimeResponseTypeVideo', + 'RuntimeResponseGenericRuntimeResponseTypeAudio', + 'RuntimeResponseGenericRuntimeResponseTypeIframe', + 'RuntimeResponseGenericRuntimeResponseTypeDate', + 'RuntimeResponseGenericRuntimeResponseTypeDtmf', + 'RuntimeResponseGenericRuntimeResponseTypeEndSession' + ])) + raise Exception(msg) + + @classmethod + def from_dict(cls, _dict: Dict) -> 'RuntimeResponseGeneric': + """Initialize a RuntimeResponseGeneric object from a json dictionary.""" + disc_class = cls._get_class_by_discriminator(_dict) + if disc_class != cls: + return disc_class.from_dict(_dict) + msg = "Cannot convert dictionary into an instance of base class 'RuntimeResponseGeneric'. The discriminator value should map to a valid subclass: {1}".format( + ", ".join([ + 'RuntimeResponseGenericRuntimeResponseTypeConversationalSearch', + 'RuntimeResponseGenericRuntimeResponseTypeText', + 'RuntimeResponseGenericRuntimeResponseTypePause', + 'RuntimeResponseGenericRuntimeResponseTypeImage', + 'RuntimeResponseGenericRuntimeResponseTypeOption', + 'RuntimeResponseGenericRuntimeResponseTypeConnectToAgent', + 'RuntimeResponseGenericRuntimeResponseTypeSuggestion', + 'RuntimeResponseGenericRuntimeResponseTypeChannelTransfer', + 'RuntimeResponseGenericRuntimeResponseTypeSearch', + 'RuntimeResponseGenericRuntimeResponseTypeUserDefined', + 'RuntimeResponseGenericRuntimeResponseTypeVideo', + 'RuntimeResponseGenericRuntimeResponseTypeAudio', + 'RuntimeResponseGenericRuntimeResponseTypeIframe', + 'RuntimeResponseGenericRuntimeResponseTypeDate', + 'RuntimeResponseGenericRuntimeResponseTypeDtmf', + 'RuntimeResponseGenericRuntimeResponseTypeEndSession' + ])) + raise Exception(msg) + + @classmethod + def _from_dict(cls, _dict: Dict): + """Initialize a RuntimeResponseGeneric object from a json dictionary.""" + return cls.from_dict(_dict) + + @classmethod + def _get_class_by_discriminator(cls, _dict: Dict) -> object: + mapping = {} + mapping[ + 'conversation_search'] = 'RuntimeResponseGenericRuntimeResponseTypeConversationalSearch' + mapping['audio'] = 'RuntimeResponseGenericRuntimeResponseTypeAudio' + mapping[ + 'channel_transfer'] = 'RuntimeResponseGenericRuntimeResponseTypeChannelTransfer' + mapping[ + 'connect_to_agent'] = 'RuntimeResponseGenericRuntimeResponseTypeConnectToAgent' + mapping['date'] = 'RuntimeResponseGenericRuntimeResponseTypeDate' + mapping['iframe'] = 'RuntimeResponseGenericRuntimeResponseTypeIframe' + mapping['image'] = 'RuntimeResponseGenericRuntimeResponseTypeImage' + mapping['option'] = 'RuntimeResponseGenericRuntimeResponseTypeOption' + mapping[ + 'suggestion'] = 'RuntimeResponseGenericRuntimeResponseTypeSuggestion' + mapping['pause'] = 'RuntimeResponseGenericRuntimeResponseTypePause' + mapping['search'] = 'RuntimeResponseGenericRuntimeResponseTypeSearch' + mapping['text'] = 'RuntimeResponseGenericRuntimeResponseTypeText' + mapping[ + 'user_defined'] = 'RuntimeResponseGenericRuntimeResponseTypeUserDefined' + mapping['video'] = 'RuntimeResponseGenericRuntimeResponseTypeVideo' + mapping['dtmf'] = 'RuntimeResponseGenericRuntimeResponseTypeDtmf' + mapping[ + 'end_session'] = 'RuntimeResponseGenericRuntimeResponseTypeEndSession' + disc_value = _dict.get('response_type') + if disc_value is None: + raise ValueError( + 'Discriminator property \'response_type\' not found in RuntimeResponseGeneric JSON' + ) + class_name = mapping.get(disc_value, disc_value) + try: + disc_class = getattr(sys.modules[__name__], class_name) + except AttributeError: + disc_class = cls + if isinstance(disc_class, object): + return disc_class + raise TypeError('%s is not a discriminator class' % class_name) + + +class SearchResult: + """ + SearchResult. + + :param str id: The unique identifier of the document in the Discovery service + collection. + This property is included in responses from search skills, which are available + only to Plus or Enterprise plan users. + :param SearchResultMetadata result_metadata: An object containing search result + metadata from the Discovery service. + :param str body: (optional) A description of the search result. This is taken + from an abstract, summary, or highlight field in the Discovery service response, + as specified in the search skill configuration. + :param str title: (optional) The title of the search result. This is taken from + a title or name field in the Discovery service response, as specified in the + search skill configuration. + :param str url: (optional) The URL of the original data object in its native + data source. + :param SearchResultHighlight highlight: (optional) An object containing segments + of text from search results with query-matching text highlighted using HTML + `` tags. + :param List[SearchResultAnswer] answers: (optional) An array specifying segments + of text within the result that were identified as direct answers to the search + query. Currently, only the single answer with the highest confidence (if any) is + returned. + **Notes:** + - Answer finding is available only if the search skill is connected to a + Discovery v2 service instance. + - Answer finding is not supported on IBM Cloud Pak for Data. + """ + + def __init__( + self, + id: str, + result_metadata: 'SearchResultMetadata', + *, + body: Optional[str] = None, + title: Optional[str] = None, + url: Optional[str] = None, + highlight: Optional['SearchResultHighlight'] = None, + answers: Optional[List['SearchResultAnswer']] = None, + ) -> None: + """ + Initialize a SearchResult object. + + :param str id: The unique identifier of the document in the Discovery + service collection. + This property is included in responses from search skills, which are + available only to Plus or Enterprise plan users. + :param SearchResultMetadata result_metadata: An object containing search + result metadata from the Discovery service. + :param str body: (optional) A description of the search result. This is + taken from an abstract, summary, or highlight field in the Discovery + service response, as specified in the search skill configuration. + :param str title: (optional) The title of the search result. This is taken + from a title or name field in the Discovery service response, as specified + in the search skill configuration. + :param str url: (optional) The URL of the original data object in its + native data source. + :param SearchResultHighlight highlight: (optional) An object containing + segments of text from search results with query-matching text highlighted + using HTML `` tags. + :param List[SearchResultAnswer] answers: (optional) An array specifying + segments of text within the result that were identified as direct answers + to the search query. Currently, only the single answer with the highest + confidence (if any) is returned. + **Notes:** + - Answer finding is available only if the search skill is connected to a + Discovery v2 service instance. + - Answer finding is not supported on IBM Cloud Pak for Data. + """ + self.id = id + self.result_metadata = result_metadata + self.body = body + self.title = title + self.url = url + self.highlight = highlight + self.answers = answers + + @classmethod + def from_dict(cls, _dict: Dict) -> 'SearchResult': + """Initialize a SearchResult object from a json dictionary.""" + args = {} + if (id := _dict.get('id')) is not None: + args['id'] = id + else: + raise ValueError( + 'Required property \'id\' not present in SearchResult JSON') + if (result_metadata := _dict.get('result_metadata')) is not None: + args['result_metadata'] = SearchResultMetadata.from_dict( + result_metadata) + else: + raise ValueError( + 'Required property \'result_metadata\' not present in SearchResult JSON' + ) + if (body := _dict.get('body')) is not None: + args['body'] = body + if (title := _dict.get('title')) is not None: + args['title'] = title + if (url := _dict.get('url')) is not None: + args['url'] = url + if (highlight := _dict.get('highlight')) is not None: + args['highlight'] = SearchResultHighlight.from_dict(highlight) + if (answers := _dict.get('answers')) is not None: + args['answers'] = [SearchResultAnswer.from_dict(v) for v in answers] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SearchResult object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'id') and self.id is not None: + _dict['id'] = self.id + if hasattr(self, + 'result_metadata') and self.result_metadata is not None: + if isinstance(self.result_metadata, dict): + _dict['result_metadata'] = self.result_metadata + else: + _dict['result_metadata'] = self.result_metadata.to_dict() + if hasattr(self, 'body') and self.body is not None: + _dict['body'] = self.body + if hasattr(self, 'title') and self.title is not None: + _dict['title'] = self.title + if hasattr(self, 'url') and self.url is not None: + _dict['url'] = self.url + if hasattr(self, 'highlight') and self.highlight is not None: + if isinstance(self.highlight, dict): + _dict['highlight'] = self.highlight + else: + _dict['highlight'] = self.highlight.to_dict() + if hasattr(self, 'answers') and self.answers is not None: + answers_list = [] + for v in self.answers: + if isinstance(v, dict): + answers_list.append(v) + else: + answers_list.append(v.to_dict()) + _dict['answers'] = answers_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this SearchResult object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'SearchResult') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'SearchResult') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class SearchResultAnswer: + """ + An object specifing a segment of text that was identified as a direct answer to the + search query. + + :param str text: The text of the answer. + :param float confidence: The confidence score for the answer, as returned by the + Discovery service. + """ + + def __init__( + self, + text: str, + confidence: float, + ) -> None: + """ + Initialize a SearchResultAnswer object. + + :param str text: The text of the answer. + :param float confidence: The confidence score for the answer, as returned + by the Discovery service. + """ + self.text = text + self.confidence = confidence + + @classmethod + def from_dict(cls, _dict: Dict) -> 'SearchResultAnswer': + """Initialize a SearchResultAnswer object from a json dictionary.""" + args = {} + if (text := _dict.get('text')) is not None: + args['text'] = text + else: + raise ValueError( + 'Required property \'text\' not present in SearchResultAnswer JSON' + ) + if (confidence := _dict.get('confidence')) is not None: + args['confidence'] = confidence + else: + raise ValueError( + 'Required property \'confidence\' not present in SearchResultAnswer JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SearchResultAnswer object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'text') and self.text is not None: + _dict['text'] = self.text + if hasattr(self, 'confidence') and self.confidence is not None: + _dict['confidence'] = self.confidence + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this SearchResultAnswer object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'SearchResultAnswer') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'SearchResultAnswer') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class SearchResultHighlight: + """ + An object containing segments of text from search results with query-matching text + highlighted using HTML `` tags. + + :param List[str] body: (optional) An array of strings containing segments taken + from body text in the search results, with query-matching substrings + highlighted. + :param List[str] title: (optional) An array of strings containing segments taken + from title text in the search results, with query-matching substrings + highlighted. + :param List[str] url: (optional) An array of strings containing segments taken + from URLs in the search results, with query-matching substrings highlighted. + + This type supports additional properties of type List[str]. An array of strings + containing segments taken from a field in the search results that is not mapped to the + `body`, `title`, or `url` property, with query-matching substrings highlighted. The + property name is the name of the field in the Discovery collection. + """ + + # The set of defined properties for the class + _properties = frozenset(['body', 'title', 'url']) + + def __init__( + self, + *, + body: Optional[List[str]] = None, + title: Optional[List[str]] = None, + url: Optional[List[str]] = None, + **kwargs: Optional[List[str]], + ) -> None: + """ + Initialize a SearchResultHighlight object. + + :param List[str] body: (optional) An array of strings containing segments + taken from body text in the search results, with query-matching substrings + highlighted. + :param List[str] title: (optional) An array of strings containing segments + taken from title text in the search results, with query-matching substrings + highlighted. + :param List[str] url: (optional) An array of strings containing segments + taken from URLs in the search results, with query-matching substrings + highlighted. + :param List[str] **kwargs: (optional) An array of strings containing + segments taken from a field in the search results that is not mapped to the + `body`, `title`, or `url` property, with query-matching substrings + highlighted. The property name is the name of the field in the Discovery + collection. + """ + self.body = body + self.title = title + self.url = url + for k, v in kwargs.items(): + if k not in SearchResultHighlight._properties: + if not isinstance(v, List): + raise ValueError( + 'Value for additional property {} must be of type List[Foo]' + .format(k)) + _v = [] + for elem in v: + if not isinstance(elem, str): + raise ValueError( + 'Value for additional property {} must be of type List[str]' + .format(k)) + _v.append(elem) + setattr(self, k, _v) + else: + raise ValueError( + 'Property {} cannot be specified as an additional property'. + format(k)) + + @classmethod + def from_dict(cls, _dict: Dict) -> 'SearchResultHighlight': + """Initialize a SearchResultHighlight object from a json dictionary.""" + args = {} + if (body := _dict.get('body')) is not None: + args['body'] = body + if (title := _dict.get('title')) is not None: + args['title'] = title + if (url := _dict.get('url')) is not None: + args['url'] = url + for k, v in _dict.items(): + if k not in cls._properties: + if not isinstance(v, List): + raise ValueError( + 'Value for additional property {} must be of type List[str]' + .format(k)) + _v = [] + for elem in v: + if not isinstance(elem, str): + raise ValueError( + 'Value for additional property {} must be of type List[str]' + .format(k)) + _v.append(elem) + args[k] = _v + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SearchResultHighlight object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'body') and self.body is not None: + _dict['body'] = self.body + if hasattr(self, 'title') and self.title is not None: + _dict['title'] = self.title + if hasattr(self, 'url') and self.url is not None: + _dict['url'] = self.url + for k in [ + _k for _k in vars(self).keys() + if _k not in SearchResultHighlight._properties + ]: + _dict[k] = getattr(self, k) + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def get_properties(self) -> Dict: + """Return the additional properties from this instance of SearchResultHighlight in the form of a dict.""" + _dict = {} + for k in [ + _k for _k in vars(self).keys() + if _k not in SearchResultHighlight._properties + ]: + _dict[k] = getattr(self, k) + return _dict + + def set_properties(self, _dict: dict): + """Set a dictionary of additional properties in this instance of SearchResultHighlight""" + for k in [ + _k for _k in vars(self).keys() + if _k not in SearchResultHighlight._properties + ]: + delattr(self, k) + for k, v in _dict.items(): + if k not in SearchResultHighlight._properties: + if not isinstance(v, List): + raise ValueError( + 'Value for additional property {} must be of type List[str]' + .format(k)) + _v = [] + for elem in v: + if not isinstance(elem, str): + raise ValueError( + 'Value for additional property {} must be of type List[str]' + .format(k)) + _v.append(elem) + setattr(self, k, _v) + else: + raise ValueError( + 'Property {} cannot be specified as an additional property'. + format(k)) + + def __str__(self) -> str: + """Return a `str` version of this SearchResultHighlight object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'SearchResultHighlight') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'SearchResultHighlight') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class SearchResultMetadata: + """ + An object containing search result metadata from the Discovery service. + + :param float confidence: (optional) The confidence score for the given result, + as returned by the Discovery service. + :param float score: (optional) An unbounded measure of the relevance of a + particular result, dependent on the query and matching document. A higher score + indicates a greater match to the query parameters. + """ + + def __init__( + self, + *, + confidence: Optional[float] = None, + score: Optional[float] = None, + ) -> None: + """ + Initialize a SearchResultMetadata object. + + :param float confidence: (optional) The confidence score for the given + result, as returned by the Discovery service. + :param float score: (optional) An unbounded measure of the relevance of a + particular result, dependent on the query and matching document. A higher + score indicates a greater match to the query parameters. + """ + self.confidence = confidence + self.score = score + + @classmethod + def from_dict(cls, _dict: Dict) -> 'SearchResultMetadata': + """Initialize a SearchResultMetadata object from a json dictionary.""" + args = {} + if (confidence := _dict.get('confidence')) is not None: + args['confidence'] = confidence + if (score := _dict.get('score')) is not None: + args['score'] = score + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SearchResultMetadata object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'confidence') and self.confidence is not None: + _dict['confidence'] = self.confidence + if hasattr(self, 'score') and self.score is not None: + _dict['score'] = self.score + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this SearchResultMetadata object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'SearchResultMetadata') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'SearchResultMetadata') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class SearchResults: + """ + SearchResults. + + :param SearchResultsResultMetadata result_metadata: The metadata of the search + result. + :param str id: The ID of the search result. It may not be unique. + :param str title: The title of the search result. + :param str body: The body content of the search result. + """ + + def __init__( + self, + result_metadata: 'SearchResultsResultMetadata', + id: str, + title: str, + body: str, + ) -> None: + """ + Initialize a SearchResults object. + + :param SearchResultsResultMetadata result_metadata: The metadata of the + search result. + :param str id: The ID of the search result. It may not be unique. + :param str title: The title of the search result. + :param str body: The body content of the search result. + """ + self.result_metadata = result_metadata + self.id = id + self.title = title + self.body = body + + @classmethod + def from_dict(cls, _dict: Dict) -> 'SearchResults': + """Initialize a SearchResults object from a json dictionary.""" + args = {} + if (result_metadata := _dict.get('result_metadata')) is not None: + args['result_metadata'] = SearchResultsResultMetadata.from_dict( + result_metadata) + else: + raise ValueError( + 'Required property \'result_metadata\' not present in SearchResults JSON' + ) + if (id := _dict.get('id')) is not None: + args['id'] = id + else: + raise ValueError( + 'Required property \'id\' not present in SearchResults JSON') + if (title := _dict.get('title')) is not None: + args['title'] = title + else: + raise ValueError( + 'Required property \'title\' not present in SearchResults JSON') + if (body := _dict.get('body')) is not None: + args['body'] = body + else: + raise ValueError( + 'Required property \'body\' not present in SearchResults JSON') + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SearchResults object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, + 'result_metadata') and self.result_metadata is not None: + if isinstance(self.result_metadata, dict): + _dict['result_metadata'] = self.result_metadata + else: + _dict['result_metadata'] = self.result_metadata.to_dict() + if hasattr(self, 'id') and self.id is not None: + _dict['id'] = self.id + if hasattr(self, 'title') and self.title is not None: + _dict['title'] = self.title + if hasattr(self, 'body') and self.body is not None: + _dict['body'] = self.body + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this SearchResults object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'SearchResults') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'SearchResults') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class SearchResultsResultMetadata: + """ + The metadata of the search result. + + :param str document_retrieval_source: (optional) The source of the search + result. + :param int score: (optional) The relevance score of the search result to the + user query. + """ + + def __init__( + self, + *, + document_retrieval_source: Optional[str] = None, + score: Optional[int] = None, + ) -> None: + """ + Initialize a SearchResultsResultMetadata object. + + :param str document_retrieval_source: (optional) The source of the search + result. + :param int score: (optional) The relevance score of the search result to + the user query. + """ + self.document_retrieval_source = document_retrieval_source + self.score = score + + @classmethod + def from_dict(cls, _dict: Dict) -> 'SearchResultsResultMetadata': + """Initialize a SearchResultsResultMetadata object from a json dictionary.""" + args = {} + if (document_retrieval_source := + _dict.get('document_retrieval_source')) is not None: + args['document_retrieval_source'] = document_retrieval_source + if (score := _dict.get('score')) is not None: + args['score'] = score + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SearchResultsResultMetadata object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'document_retrieval_source' + ) and self.document_retrieval_source is not None: + _dict['document_retrieval_source'] = self.document_retrieval_source + if hasattr(self, 'score') and self.score is not None: + _dict['score'] = self.score + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this SearchResultsResultMetadata object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'SearchResultsResultMetadata') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'SearchResultsResultMetadata') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class SearchSettings: + """ + An object describing the search skill configuration. + **Note:** Search settings are not supported in **Import skills** requests, and are not + included in **Export skills** responses. + + :param SearchSettingsDiscovery discovery: (optional) Configuration settings for + the Watson Discovery service instance used by the search integration. + :param SearchSettingsMessages messages: The messages included with responses + from the search integration. + :param SearchSettingsSchemaMapping schema_mapping: The mapping between fields in + the Watson Discovery collection and properties in the search response. + :param SearchSettingsElasticSearch elastic_search: (optional) Configuration + settings for the Elasticsearch service used by the search integration. You can + provide either basic auth or apiKey auth. + :param SearchSettingsConversationalSearch conversational_search: Configuration + settings for conversational search. + :param SearchSettingsServerSideSearch server_side_search: (optional) + Configuration settings for the server-side search service used by the search + integration. You can provide either basic auth, apiKey auth or none. + :param SearchSettingsClientSideSearch client_side_search: (optional) + Configuration settings for the client-side search service or server-side search + service used by the search integration. + """ + + def __init__( + self, + messages: 'SearchSettingsMessages', + schema_mapping: 'SearchSettingsSchemaMapping', + conversational_search: 'SearchSettingsConversationalSearch', + *, + discovery: Optional['SearchSettingsDiscovery'] = None, + elastic_search: Optional['SearchSettingsElasticSearch'] = None, + server_side_search: Optional['SearchSettingsServerSideSearch'] = None, + client_side_search: Optional['SearchSettingsClientSideSearch'] = None, + ) -> None: + """ + Initialize a SearchSettings object. + + :param SearchSettingsMessages messages: The messages included with + responses from the search integration. + :param SearchSettingsSchemaMapping schema_mapping: The mapping between + fields in the Watson Discovery collection and properties in the search + response. + :param SearchSettingsConversationalSearch conversational_search: + Configuration settings for conversational search. + :param SearchSettingsDiscovery discovery: (optional) Configuration settings + for the Watson Discovery service instance used by the search integration. + :param SearchSettingsElasticSearch elastic_search: (optional) Configuration + settings for the Elasticsearch service used by the search integration. You + can provide either basic auth or apiKey auth. + :param SearchSettingsServerSideSearch server_side_search: (optional) + Configuration settings for the server-side search service used by the + search integration. You can provide either basic auth, apiKey auth or none. + :param SearchSettingsClientSideSearch client_side_search: (optional) + Configuration settings for the client-side search service or server-side + search service used by the search integration. + """ + self.discovery = discovery + self.messages = messages + self.schema_mapping = schema_mapping + self.elastic_search = elastic_search + self.conversational_search = conversational_search + self.server_side_search = server_side_search + self.client_side_search = client_side_search + + @classmethod + def from_dict(cls, _dict: Dict) -> 'SearchSettings': + """Initialize a SearchSettings object from a json dictionary.""" + args = {} + if (discovery := _dict.get('discovery')) is not None: + args['discovery'] = SearchSettingsDiscovery.from_dict(discovery) + if (messages := _dict.get('messages')) is not None: + args['messages'] = SearchSettingsMessages.from_dict(messages) + else: + raise ValueError( + 'Required property \'messages\' not present in SearchSettings JSON' + ) + if (schema_mapping := _dict.get('schema_mapping')) is not None: + args['schema_mapping'] = SearchSettingsSchemaMapping.from_dict( + schema_mapping) + else: + raise ValueError( + 'Required property \'schema_mapping\' not present in SearchSettings JSON' + ) + if (elastic_search := _dict.get('elastic_search')) is not None: + args['elastic_search'] = SearchSettingsElasticSearch.from_dict( + elastic_search) + if (conversational_search := + _dict.get('conversational_search')) is not None: + args[ + 'conversational_search'] = SearchSettingsConversationalSearch.from_dict( + conversational_search) + else: + raise ValueError( + 'Required property \'conversational_search\' not present in SearchSettings JSON' + ) + if (server_side_search := _dict.get('server_side_search')) is not None: + args[ + 'server_side_search'] = SearchSettingsServerSideSearch.from_dict( + server_side_search) + if (client_side_search := _dict.get('client_side_search')) is not None: + args[ + 'client_side_search'] = SearchSettingsClientSideSearch.from_dict( + client_side_search) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SearchSettings object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'discovery') and self.discovery is not None: + if isinstance(self.discovery, dict): + _dict['discovery'] = self.discovery + else: + _dict['discovery'] = self.discovery.to_dict() + if hasattr(self, 'messages') and self.messages is not None: + if isinstance(self.messages, dict): + _dict['messages'] = self.messages + else: + _dict['messages'] = self.messages.to_dict() + if hasattr(self, 'schema_mapping') and self.schema_mapping is not None: + if isinstance(self.schema_mapping, dict): + _dict['schema_mapping'] = self.schema_mapping + else: + _dict['schema_mapping'] = self.schema_mapping.to_dict() + if hasattr(self, 'elastic_search') and self.elastic_search is not None: + if isinstance(self.elastic_search, dict): + _dict['elastic_search'] = self.elastic_search + else: + _dict['elastic_search'] = self.elastic_search.to_dict() + if hasattr(self, 'conversational_search' + ) and self.conversational_search is not None: + if isinstance(self.conversational_search, dict): + _dict['conversational_search'] = self.conversational_search + else: + _dict[ + 'conversational_search'] = self.conversational_search.to_dict( + ) + if hasattr( + self, + 'server_side_search') and self.server_side_search is not None: + if isinstance(self.server_side_search, dict): + _dict['server_side_search'] = self.server_side_search + else: + _dict['server_side_search'] = self.server_side_search.to_dict() + if hasattr( + self, + 'client_side_search') and self.client_side_search is not None: + if isinstance(self.client_side_search, dict): + _dict['client_side_search'] = self.client_side_search + else: + _dict['client_side_search'] = self.client_side_search.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this SearchSettings object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'SearchSettings') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'SearchSettings') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class SearchSettingsClientSideSearch: + """ + Configuration settings for the client-side search service or server-side search + service used by the search integration. + + :param str filter: (optional) The filter string that is applied to the search + results. + :param dict metadata: (optional) The metadata object. + """ + + def __init__( + self, + *, + filter: Optional[str] = None, + metadata: Optional[dict] = None, + ) -> None: + """ + Initialize a SearchSettingsClientSideSearch object. + + :param str filter: (optional) The filter string that is applied to the + search results. + :param dict metadata: (optional) The metadata object. + """ + self.filter = filter + self.metadata = metadata + + @classmethod + def from_dict(cls, _dict: Dict) -> 'SearchSettingsClientSideSearch': + """Initialize a SearchSettingsClientSideSearch object from a json dictionary.""" + args = {} + if (filter := _dict.get('filter')) is not None: + args['filter'] = filter + if (metadata := _dict.get('metadata')) is not None: + args['metadata'] = metadata + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SearchSettingsClientSideSearch object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'filter') and self.filter is not None: + _dict['filter'] = self.filter + if hasattr(self, 'metadata') and self.metadata is not None: + _dict['metadata'] = self.metadata + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this SearchSettingsClientSideSearch object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'SearchSettingsClientSideSearch') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'SearchSettingsClientSideSearch') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class SearchSettingsConversationalSearch: + """ + Configuration settings for conversational search. + + :param bool enabled: Whether to enable conversational search. + :param SearchSettingsConversationalSearchResponseLength response_length: + (optional) + :param SearchSettingsConversationalSearchSearchConfidence search_confidence: + (optional) + """ + + def __init__( + self, + enabled: bool, + *, + response_length: Optional[ + 'SearchSettingsConversationalSearchResponseLength'] = None, + search_confidence: Optional[ + 'SearchSettingsConversationalSearchSearchConfidence'] = None, + ) -> None: + """ + Initialize a SearchSettingsConversationalSearch object. + + :param bool enabled: Whether to enable conversational search. + :param SearchSettingsConversationalSearchResponseLength response_length: + (optional) + :param SearchSettingsConversationalSearchSearchConfidence + search_confidence: (optional) + """ + self.enabled = enabled + self.response_length = response_length + self.search_confidence = search_confidence + + @classmethod + def from_dict(cls, _dict: Dict) -> 'SearchSettingsConversationalSearch': + """Initialize a SearchSettingsConversationalSearch object from a json dictionary.""" + args = {} + if (enabled := _dict.get('enabled')) is not None: + args['enabled'] = enabled + else: + raise ValueError( + 'Required property \'enabled\' not present in SearchSettingsConversationalSearch JSON' + ) + if (response_length := _dict.get('response_length')) is not None: + args[ + 'response_length'] = SearchSettingsConversationalSearchResponseLength.from_dict( + response_length) + if (search_confidence := _dict.get('search_confidence')) is not None: + args[ + 'search_confidence'] = SearchSettingsConversationalSearchSearchConfidence.from_dict( + search_confidence) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SearchSettingsConversationalSearch object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'enabled') and self.enabled is not None: + _dict['enabled'] = self.enabled + if hasattr(self, + 'response_length') and self.response_length is not None: + if isinstance(self.response_length, dict): + _dict['response_length'] = self.response_length + else: + _dict['response_length'] = self.response_length.to_dict() + if hasattr(self, + 'search_confidence') and self.search_confidence is not None: + if isinstance(self.search_confidence, dict): + _dict['search_confidence'] = self.search_confidence + else: + _dict['search_confidence'] = self.search_confidence.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this SearchSettingsConversationalSearch object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'SearchSettingsConversationalSearch') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'SearchSettingsConversationalSearch') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class SearchSettingsConversationalSearchResponseLength: + """ + SearchSettingsConversationalSearchResponseLength. + + :param str option: (optional) The response length option. It controls the length + of the generated response. + """ + + def __init__( + self, + *, + option: Optional[str] = None, + ) -> None: + """ + Initialize a SearchSettingsConversationalSearchResponseLength object. + + :param str option: (optional) The response length option. It controls the + length of the generated response. + """ + self.option = option + + @classmethod + def from_dict( + cls, + _dict: Dict) -> 'SearchSettingsConversationalSearchResponseLength': + """Initialize a SearchSettingsConversationalSearchResponseLength object from a json dictionary.""" + args = {} + if (option := _dict.get('option')) is not None: + args['option'] = option + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SearchSettingsConversationalSearchResponseLength object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'option') and self.option is not None: + _dict['option'] = self.option + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this SearchSettingsConversationalSearchResponseLength object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__( + self, + other: 'SearchSettingsConversationalSearchResponseLength') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__( + self, + other: 'SearchSettingsConversationalSearchResponseLength') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class OptionEnum(str, Enum): + """ + The response length option. It controls the length of the generated response. + """ + + CONCISE = 'concise' + MODERATE = 'moderate' + VERBOSE = 'verbose' + + +class SearchSettingsConversationalSearchSearchConfidence: + """ + SearchSettingsConversationalSearchSearchConfidence. + + :param str threshold: (optional) The search confidence threshold. + It controls the tendency for conversational search to produce “I don't know” + answers. + """ + + def __init__( + self, + *, + threshold: Optional[str] = None, + ) -> None: + """ + Initialize a SearchSettingsConversationalSearchSearchConfidence object. + + :param str threshold: (optional) The search confidence threshold. + It controls the tendency for conversational search to produce “I don't + know” answers. + """ + self.threshold = threshold + + @classmethod + def from_dict( + cls, _dict: Dict + ) -> 'SearchSettingsConversationalSearchSearchConfidence': + """Initialize a SearchSettingsConversationalSearchSearchConfidence object from a json dictionary.""" + args = {} + if (threshold := _dict.get('threshold')) is not None: + args['threshold'] = threshold + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SearchSettingsConversationalSearchSearchConfidence object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'threshold') and self.threshold is not None: + _dict['threshold'] = self.threshold + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this SearchSettingsConversationalSearchSearchConfidence object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__( + self, other: 'SearchSettingsConversationalSearchSearchConfidence' + ) -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__( + self, other: 'SearchSettingsConversationalSearchSearchConfidence' + ) -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class ThresholdEnum(str, Enum): + """ + The search confidence threshold. + It controls the tendency for conversational search to produce “I don't know” + answers. + """ + + RARELY = 'rarely' + LESS_OFTEN = 'less_often' + MORE_OFTEN = 'more_often' + MOST_OFTEN = 'most_often' + + +class SearchSettingsDiscovery: + """ + Configuration settings for the Watson Discovery service instance used by the search + integration. + + :param str instance_id: The ID for the Watson Discovery service instance. + :param str project_id: The ID for the Watson Discovery project. + :param str url: The URL for the Watson Discovery service instance. + :param int max_primary_results: (optional) The maximum number of primary results + to include in the response. + :param int max_total_results: (optional) The maximum total number of primary and + additional results to include in the response. + :param float confidence_threshold: (optional) The minimum confidence threshold + for included results. Any results with a confidence below this threshold will be + discarded. + :param bool highlight: (optional) Whether to include the most relevant passages + of text in the **highlight** property of each result. + :param bool find_answers: (optional) Whether to use the answer finding feature + to emphasize answers within highlighted passages. This property is ignored if + **highlight**=`false`. + **Notes:** + - Answer finding is available only if the search skill is connected to a + Discovery v2 service instance. + - Answer finding is not supported on IBM Cloud Pak for Data. + :param SearchSettingsDiscoveryAuthentication authentication: Authentication + information for the Watson Discovery service. For more information, see the + [Watson Discovery + documentation](https://cloud.ibm.com/apidocs/discovery-data#authentication). + **Note:** You must specify either **basic** or **bearer**, but not both. + """ + + def __init__( + self, + instance_id: str, + project_id: str, + url: str, + authentication: 'SearchSettingsDiscoveryAuthentication', + *, + max_primary_results: Optional[int] = None, + max_total_results: Optional[int] = None, + confidence_threshold: Optional[float] = None, + highlight: Optional[bool] = None, + find_answers: Optional[bool] = None, + ) -> None: + """ + Initialize a SearchSettingsDiscovery object. + + :param str instance_id: The ID for the Watson Discovery service instance. + :param str project_id: The ID for the Watson Discovery project. + :param str url: The URL for the Watson Discovery service instance. + :param SearchSettingsDiscoveryAuthentication authentication: Authentication + information for the Watson Discovery service. For more information, see the + [Watson Discovery + documentation](https://cloud.ibm.com/apidocs/discovery-data#authentication). + **Note:** You must specify either **basic** or **bearer**, but not both. + :param int max_primary_results: (optional) The maximum number of primary + results to include in the response. + :param int max_total_results: (optional) The maximum total number of + primary and additional results to include in the response. + :param float confidence_threshold: (optional) The minimum confidence + threshold for included results. Any results with a confidence below this + threshold will be discarded. + :param bool highlight: (optional) Whether to include the most relevant + passages of text in the **highlight** property of each result. + :param bool find_answers: (optional) Whether to use the answer finding + feature to emphasize answers within highlighted passages. This property is + ignored if **highlight**=`false`. + **Notes:** + - Answer finding is available only if the search skill is connected to a + Discovery v2 service instance. + - Answer finding is not supported on IBM Cloud Pak for Data. + """ + self.instance_id = instance_id + self.project_id = project_id + self.url = url + self.max_primary_results = max_primary_results + self.max_total_results = max_total_results + self.confidence_threshold = confidence_threshold + self.highlight = highlight + self.find_answers = find_answers + self.authentication = authentication + + @classmethod + def from_dict(cls, _dict: Dict) -> 'SearchSettingsDiscovery': + """Initialize a SearchSettingsDiscovery object from a json dictionary.""" + args = {} + if (instance_id := _dict.get('instance_id')) is not None: + args['instance_id'] = instance_id + else: + raise ValueError( + 'Required property \'instance_id\' not present in SearchSettingsDiscovery JSON' + ) + if (project_id := _dict.get('project_id')) is not None: + args['project_id'] = project_id + else: + raise ValueError( + 'Required property \'project_id\' not present in SearchSettingsDiscovery JSON' + ) + if (url := _dict.get('url')) is not None: + args['url'] = url + else: + raise ValueError( + 'Required property \'url\' not present in SearchSettingsDiscovery JSON' + ) + if (max_primary_results := + _dict.get('max_primary_results')) is not None: + args['max_primary_results'] = max_primary_results + if (max_total_results := _dict.get('max_total_results')) is not None: + args['max_total_results'] = max_total_results + if (confidence_threshold := + _dict.get('confidence_threshold')) is not None: + args['confidence_threshold'] = confidence_threshold + if (highlight := _dict.get('highlight')) is not None: + args['highlight'] = highlight + if (find_answers := _dict.get('find_answers')) is not None: + args['find_answers'] = find_answers + if (authentication := _dict.get('authentication')) is not None: + args[ + 'authentication'] = SearchSettingsDiscoveryAuthentication.from_dict( + authentication) + else: + raise ValueError( + 'Required property \'authentication\' not present in SearchSettingsDiscovery JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SearchSettingsDiscovery object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'instance_id') and self.instance_id is not None: + _dict['instance_id'] = self.instance_id + if hasattr(self, 'project_id') and self.project_id is not None: + _dict['project_id'] = self.project_id + if hasattr(self, 'url') and self.url is not None: + _dict['url'] = self.url + if hasattr( + self, + 'max_primary_results') and self.max_primary_results is not None: + _dict['max_primary_results'] = self.max_primary_results + if hasattr(self, + 'max_total_results') and self.max_total_results is not None: + _dict['max_total_results'] = self.max_total_results + if hasattr(self, 'confidence_threshold' + ) and self.confidence_threshold is not None: + _dict['confidence_threshold'] = self.confidence_threshold + if hasattr(self, 'highlight') and self.highlight is not None: + _dict['highlight'] = self.highlight + if hasattr(self, 'find_answers') and self.find_answers is not None: + _dict['find_answers'] = self.find_answers + if hasattr(self, 'authentication') and self.authentication is not None: + if isinstance(self.authentication, dict): + _dict['authentication'] = self.authentication + else: + _dict['authentication'] = self.authentication.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this SearchSettingsDiscovery object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'SearchSettingsDiscovery') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'SearchSettingsDiscovery') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class SearchSettingsDiscoveryAuthentication: + """ + Authentication information for the Watson Discovery service. For more information, see + the [Watson Discovery + documentation](https://cloud.ibm.com/apidocs/discovery-data#authentication). + **Note:** You must specify either **basic** or **bearer**, but not both. + + :param str basic: (optional) The HTTP basic authentication credentials for + Watson Discovery. Specify your Watson Discovery API key in the format + `apikey:{apikey}`. + :param str bearer: (optional) The authentication bearer token for Watson + Discovery. + """ + + def __init__( + self, + *, + basic: Optional[str] = None, + bearer: Optional[str] = None, + ) -> None: + """ + Initialize a SearchSettingsDiscoveryAuthentication object. + + :param str basic: (optional) The HTTP basic authentication credentials for + Watson Discovery. Specify your Watson Discovery API key in the format + `apikey:{apikey}`. + :param str bearer: (optional) The authentication bearer token for Watson + Discovery. + """ + self.basic = basic + self.bearer = bearer + + @classmethod + def from_dict(cls, _dict: Dict) -> 'SearchSettingsDiscoveryAuthentication': + """Initialize a SearchSettingsDiscoveryAuthentication object from a json dictionary.""" + args = {} + if (basic := _dict.get('basic')) is not None: + args['basic'] = basic + if (bearer := _dict.get('bearer')) is not None: + args['bearer'] = bearer + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SearchSettingsDiscoveryAuthentication object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'basic') and self.basic is not None: + _dict['basic'] = self.basic + if hasattr(self, 'bearer') and self.bearer is not None: + _dict['bearer'] = self.bearer + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this SearchSettingsDiscoveryAuthentication object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'SearchSettingsDiscoveryAuthentication') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'SearchSettingsDiscoveryAuthentication') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class SearchSettingsElasticSearch: + """ + Configuration settings for the Elasticsearch service used by the search integration. + You can provide either basic auth or apiKey auth. + + :param str url: The URL for the Elasticsearch service. + :param str port: The port number for the Elasticsearch service URL. + **Note:** It can be omitted if a port number is appended to the URL. + :param str username: (optional) The username of the basic authentication method. + :param str password: (optional) The password of the basic authentication method. + The credentials are not returned due to security reasons. + :param str index: The Elasticsearch index to use for the search integration. + :param List[object] filter: (optional) An array of filters that can be applied + to the search results via the `$FILTER` variable in the `query_body`.For more + information, see [Elasticsearch filter + documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/filter-search-results.html). + :param dict query_body: (optional) The Elasticsearch query object. For more + information, see [Elasticsearch search API + documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html). + :param str managed_index: (optional) The Elasticsearch index for uploading + documents. It is created automatically when the upload document option is + selected from the user interface. + :param str apikey: (optional) The API key of the apiKey authentication method. + Use either basic auth or apiKey auth. The credentials are not returned due to + security reasons. + """ + + def __init__( + self, + url: str, + port: str, + index: str, + *, + username: Optional[str] = None, + password: Optional[str] = None, + filter: Optional[List[object]] = None, + query_body: Optional[dict] = None, + managed_index: Optional[str] = None, + apikey: Optional[str] = None, + ) -> None: + """ + Initialize a SearchSettingsElasticSearch object. + + :param str url: The URL for the Elasticsearch service. + :param str port: The port number for the Elasticsearch service URL. + **Note:** It can be omitted if a port number is appended to the URL. + :param str index: The Elasticsearch index to use for the search + integration. + :param str username: (optional) The username of the basic authentication + method. + :param str password: (optional) The password of the basic authentication + method. The credentials are not returned due to security reasons. + :param List[object] filter: (optional) An array of filters that can be + applied to the search results via the `$FILTER` variable in the + `query_body`.For more information, see [Elasticsearch filter + documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/filter-search-results.html). + :param dict query_body: (optional) The Elasticsearch query object. For more + information, see [Elasticsearch search API + documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html). + :param str managed_index: (optional) The Elasticsearch index for uploading + documents. It is created automatically when the upload document option is + selected from the user interface. + :param str apikey: (optional) The API key of the apiKey authentication + method. Use either basic auth or apiKey auth. The credentials are not + returned due to security reasons. + """ + self.url = url + self.port = port + self.username = username + self.password = password + self.index = index + self.filter = filter + self.query_body = query_body + self.managed_index = managed_index + self.apikey = apikey + + @classmethod + def from_dict(cls, _dict: Dict) -> 'SearchSettingsElasticSearch': + """Initialize a SearchSettingsElasticSearch object from a json dictionary.""" + args = {} + if (url := _dict.get('url')) is not None: + args['url'] = url + else: + raise ValueError( + 'Required property \'url\' not present in SearchSettingsElasticSearch JSON' + ) + if (port := _dict.get('port')) is not None: + args['port'] = port + else: + raise ValueError( + 'Required property \'port\' not present in SearchSettingsElasticSearch JSON' + ) + if (username := _dict.get('username')) is not None: + args['username'] = username + if (password := _dict.get('password')) is not None: + args['password'] = password + if (index := _dict.get('index')) is not None: + args['index'] = index + else: + raise ValueError( + 'Required property \'index\' not present in SearchSettingsElasticSearch JSON' + ) + if (filter := _dict.get('filter')) is not None: + args['filter'] = filter + if (query_body := _dict.get('query_body')) is not None: + args['query_body'] = query_body + if (managed_index := _dict.get('managed_index')) is not None: + args['managed_index'] = managed_index + if (apikey := _dict.get('apikey')) is not None: + args['apikey'] = apikey + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SearchSettingsElasticSearch object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'url') and self.url is not None: + _dict['url'] = self.url + if hasattr(self, 'port') and self.port is not None: + _dict['port'] = self.port + if hasattr(self, 'username') and self.username is not None: + _dict['username'] = self.username + if hasattr(self, 'password') and self.password is not None: + _dict['password'] = self.password + if hasattr(self, 'index') and self.index is not None: + _dict['index'] = self.index + if hasattr(self, 'filter') and self.filter is not None: + _dict['filter'] = self.filter + if hasattr(self, 'query_body') and self.query_body is not None: + _dict['query_body'] = self.query_body + if hasattr(self, 'managed_index') and self.managed_index is not None: + _dict['managed_index'] = self.managed_index + if hasattr(self, 'apikey') and self.apikey is not None: + _dict['apikey'] = self.apikey + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this SearchSettingsElasticSearch object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'SearchSettingsElasticSearch') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'SearchSettingsElasticSearch') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class SearchSettingsMessages: + """ + The messages included with responses from the search integration. + + :param str success: The message to include in the response to a successful + query. + :param str error: The message to include in the response when the query + encounters an error. + :param str no_result: The message to include in the response when there is no + result from the query. + """ + + def __init__( + self, + success: str, + error: str, + no_result: str, + ) -> None: + """ + Initialize a SearchSettingsMessages object. + + :param str success: The message to include in the response to a successful + query. + :param str error: The message to include in the response when the query + encounters an error. + :param str no_result: The message to include in the response when there is + no result from the query. + """ + self.success = success + self.error = error + self.no_result = no_result + + @classmethod + def from_dict(cls, _dict: Dict) -> 'SearchSettingsMessages': + """Initialize a SearchSettingsMessages object from a json dictionary.""" + args = {} + if (success := _dict.get('success')) is not None: + args['success'] = success + else: + raise ValueError( + 'Required property \'success\' not present in SearchSettingsMessages JSON' + ) + if (error := _dict.get('error')) is not None: + args['error'] = error + else: + raise ValueError( + 'Required property \'error\' not present in SearchSettingsMessages JSON' + ) + if (no_result := _dict.get('no_result')) is not None: + args['no_result'] = no_result + else: + raise ValueError( + 'Required property \'no_result\' not present in SearchSettingsMessages JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SearchSettingsMessages object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'success') and self.success is not None: + _dict['success'] = self.success + if hasattr(self, 'error') and self.error is not None: + _dict['error'] = self.error + if hasattr(self, 'no_result') and self.no_result is not None: + _dict['no_result'] = self.no_result + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this SearchSettingsMessages object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'SearchSettingsMessages') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'SearchSettingsMessages') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class SearchSettingsSchemaMapping: + """ + The mapping between fields in the Watson Discovery collection and properties in the + search response. + + :param str url: The field in the collection to map to the **url** property of + the response. + :param str body: The field in the collection to map to the **body** property in + the response. + :param str title: The field in the collection to map to the **title** property + for the schema. + """ + + def __init__( + self, + url: str, + body: str, + title: str, + ) -> None: + """ + Initialize a SearchSettingsSchemaMapping object. + + :param str url: The field in the collection to map to the **url** property + of the response. + :param str body: The field in the collection to map to the **body** + property in the response. + :param str title: The field in the collection to map to the **title** + property for the schema. + """ + self.url = url + self.body = body + self.title = title + + @classmethod + def from_dict(cls, _dict: Dict) -> 'SearchSettingsSchemaMapping': + """Initialize a SearchSettingsSchemaMapping object from a json dictionary.""" + args = {} + if (url := _dict.get('url')) is not None: + args['url'] = url + else: + raise ValueError( + 'Required property \'url\' not present in SearchSettingsSchemaMapping JSON' + ) + if (body := _dict.get('body')) is not None: + args['body'] = body + else: + raise ValueError( + 'Required property \'body\' not present in SearchSettingsSchemaMapping JSON' + ) + if (title := _dict.get('title')) is not None: + args['title'] = title + else: + raise ValueError( + 'Required property \'title\' not present in SearchSettingsSchemaMapping JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SearchSettingsSchemaMapping object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'url') and self.url is not None: + _dict['url'] = self.url + if hasattr(self, 'body') and self.body is not None: + _dict['body'] = self.body + if hasattr(self, 'title') and self.title is not None: + _dict['title'] = self.title + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this SearchSettingsSchemaMapping object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'SearchSettingsSchemaMapping') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'SearchSettingsSchemaMapping') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class SearchSettingsServerSideSearch: + """ + Configuration settings for the server-side search service used by the search + integration. You can provide either basic auth, apiKey auth or none. + + :param str url: The URL of the server-side search service. + :param str port: (optional) The port number of the server-side search service. + :param str username: (optional) The username of the basic authentication method. + :param str password: (optional) The password of the basic authentication method. + The credentials are not returned due to security reasons. + :param str filter: (optional) The filter string that is applied to the search + results. + :param dict metadata: (optional) The metadata object. + :param str apikey: (optional) The API key of the apiKey authentication method. + The credentails are not returned due to security reasons. + :param bool no_auth: (optional) To clear previous auth, specify `no_auth = + true`. + :param str auth_type: (optional) The authorization type that is used. + """ + + def __init__( + self, + url: str, + *, + port: Optional[str] = None, + username: Optional[str] = None, + password: Optional[str] = None, + filter: Optional[str] = None, + metadata: Optional[dict] = None, + apikey: Optional[str] = None, + no_auth: Optional[bool] = None, + auth_type: Optional[str] = None, + ) -> None: + """ + Initialize a SearchSettingsServerSideSearch object. + + :param str url: The URL of the server-side search service. + :param str port: (optional) The port number of the server-side search + service. + :param str username: (optional) The username of the basic authentication + method. + :param str password: (optional) The password of the basic authentication + method. The credentials are not returned due to security reasons. + :param str filter: (optional) The filter string that is applied to the + search results. + :param dict metadata: (optional) The metadata object. + :param str apikey: (optional) The API key of the apiKey authentication + method. The credentails are not returned due to security reasons. + :param bool no_auth: (optional) To clear previous auth, specify `no_auth = + true`. + :param str auth_type: (optional) The authorization type that is used. + """ + self.url = url + self.port = port + self.username = username + self.password = password + self.filter = filter + self.metadata = metadata + self.apikey = apikey + self.no_auth = no_auth + self.auth_type = auth_type + + @classmethod + def from_dict(cls, _dict: Dict) -> 'SearchSettingsServerSideSearch': + """Initialize a SearchSettingsServerSideSearch object from a json dictionary.""" + args = {} + if (url := _dict.get('url')) is not None: + args['url'] = url + else: + raise ValueError( + 'Required property \'url\' not present in SearchSettingsServerSideSearch JSON' + ) + if (port := _dict.get('port')) is not None: + args['port'] = port + if (username := _dict.get('username')) is not None: + args['username'] = username + if (password := _dict.get('password')) is not None: + args['password'] = password + if (filter := _dict.get('filter')) is not None: + args['filter'] = filter + if (metadata := _dict.get('metadata')) is not None: + args['metadata'] = metadata + if (apikey := _dict.get('apikey')) is not None: + args['apikey'] = apikey + if (no_auth := _dict.get('no_auth')) is not None: + args['no_auth'] = no_auth + if (auth_type := _dict.get('auth_type')) is not None: + args['auth_type'] = auth_type + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SearchSettingsServerSideSearch object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'url') and self.url is not None: + _dict['url'] = self.url + if hasattr(self, 'port') and self.port is not None: + _dict['port'] = self.port + if hasattr(self, 'username') and self.username is not None: + _dict['username'] = self.username + if hasattr(self, 'password') and self.password is not None: + _dict['password'] = self.password + if hasattr(self, 'filter') and self.filter is not None: + _dict['filter'] = self.filter + if hasattr(self, 'metadata') and self.metadata is not None: + _dict['metadata'] = self.metadata + if hasattr(self, 'apikey') and self.apikey is not None: + _dict['apikey'] = self.apikey + if hasattr(self, 'no_auth') and self.no_auth is not None: + _dict['no_auth'] = self.no_auth + if hasattr(self, 'auth_type') and self.auth_type is not None: + _dict['auth_type'] = self.auth_type + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this SearchSettingsServerSideSearch object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'SearchSettingsServerSideSearch') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'SearchSettingsServerSideSearch') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class AuthTypeEnum(str, Enum): + """ + The authorization type that is used. + """ + + BASIC = 'basic' + APIKEY = 'apikey' + NONE = 'none' + + +class SearchSkillWarning: + """ + A warning describing an error in the search skill configuration. + + :param str code: (optional) The error code. + :param str path: (optional) The location of the error in the search skill + configuration object. + :param str message: (optional) The error message. + """ + + def __init__( + self, + *, + code: Optional[str] = None, + path: Optional[str] = None, + message: Optional[str] = None, + ) -> None: + """ + Initialize a SearchSkillWarning object. + + :param str code: (optional) The error code. + :param str path: (optional) The location of the error in the search skill + configuration object. + :param str message: (optional) The error message. + """ + self.code = code + self.path = path + self.message = message + + @classmethod + def from_dict(cls, _dict: Dict) -> 'SearchSkillWarning': + """Initialize a SearchSkillWarning object from a json dictionary.""" + args = {} + if (code := _dict.get('code')) is not None: + args['code'] = code + if (path := _dict.get('path')) is not None: + args['path'] = path + if (message := _dict.get('message')) is not None: + args['message'] = message + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SearchSkillWarning object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'code') and self.code is not None: + _dict['code'] = self.code + if hasattr(self, 'path') and self.path is not None: + _dict['path'] = self.path + if hasattr(self, 'message') and self.message is not None: + _dict['message'] = self.message + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this SearchSkillWarning object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'SearchSkillWarning') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'SearchSkillWarning') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class SessionResponse: + """ + SessionResponse. + + :param str session_id: The session ID. + """ + + def __init__( + self, + session_id: str, + ) -> None: + """ + Initialize a SessionResponse object. + + :param str session_id: The session ID. + """ + self.session_id = session_id + + @classmethod + def from_dict(cls, _dict: Dict) -> 'SessionResponse': + """Initialize a SessionResponse object from a json dictionary.""" + args = {} + if (session_id := _dict.get('session_id')) is not None: + args['session_id'] = session_id + else: + raise ValueError( + 'Required property \'session_id\' not present in SessionResponse JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SessionResponse object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'session_id') and self.session_id is not None: + _dict['session_id'] = self.session_id + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this SessionResponse object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'SessionResponse') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'SessionResponse') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Skill: + """ + Skill. + + :param str name: (optional) The name of the skill. This string cannot contain + carriage return, newline, or tab characters. + :param str description: (optional) The description of the skill. This string + cannot contain carriage return, newline, or tab characters. + :param dict workspace: (optional) An object containing the conversational + content of an action or dialog skill. + :param str skill_id: (optional) The skill ID of the skill. + :param str status: (optional) The current status of the skill: + - **Available**: The skill is available and ready to process messages. + - **Failed**: An asynchronous operation has failed. See the **status_errors** + property for more information about the cause of the failure. + - **Non Existent**: The skill does not exist. + - **Processing**: An asynchronous operation has not yet completed. + - **Training**: The skill is training based on new data. + :param List[StatusError] status_errors: (optional) An array of messages about + errors that caused an asynchronous operation to fail. Included only if + **status**=`Failed`. + :param str status_description: (optional) The description of the failed + asynchronous operation. Included only if **status**=`Failed`. + :param dict dialog_settings: (optional) For internal use only. + :param str assistant_id: (optional) The unique identifier of the assistant the + skill is associated with. + :param str workspace_id: (optional) The unique identifier of the workspace that + contains the skill content. Included only for action and dialog skills. + :param str environment_id: (optional) The unique identifier of the environment + where the skill is defined. For action and dialog skills, this is always the + draft environment. + :param bool valid: (optional) Whether the skill is structurally valid. + :param str next_snapshot_version: (optional) The name that will be given to the + next snapshot that is created for the skill. A snapshot of each versionable + skill is saved for each new release of an assistant. + :param SearchSettings search_settings: (optional) An object describing the + search skill configuration. + **Note:** Search settings are not supported in **Import skills** requests, and + are not included in **Export skills** responses. + :param List[SearchSkillWarning] warnings: (optional) An array of warnings + describing errors with the search skill configuration. Included only for search + skills. + :param str language: The language of the skill. + :param str type: The type of skill. + """ + + def __init__( + self, + language: str, + type: str, + *, + name: Optional[str] = None, + description: Optional[str] = None, + workspace: Optional[dict] = None, + skill_id: Optional[str] = None, + status: Optional[str] = None, + status_errors: Optional[List['StatusError']] = None, + status_description: Optional[str] = None, + dialog_settings: Optional[dict] = None, + assistant_id: Optional[str] = None, + workspace_id: Optional[str] = None, + environment_id: Optional[str] = None, + valid: Optional[bool] = None, + next_snapshot_version: Optional[str] = None, + search_settings: Optional['SearchSettings'] = None, + warnings: Optional[List['SearchSkillWarning']] = None, + ) -> None: + """ + Initialize a Skill object. + + :param str language: The language of the skill. + :param str type: The type of skill. + :param str name: (optional) The name of the skill. This string cannot + contain carriage return, newline, or tab characters. + :param str description: (optional) The description of the skill. This + string cannot contain carriage return, newline, or tab characters. + :param dict workspace: (optional) An object containing the conversational + content of an action or dialog skill. + :param dict dialog_settings: (optional) For internal use only. + :param SearchSettings search_settings: (optional) An object describing the + search skill configuration. + **Note:** Search settings are not supported in **Import skills** requests, + and are not included in **Export skills** responses. + """ + self.name = name + self.description = description + self.workspace = workspace + self.skill_id = skill_id + self.status = status + self.status_errors = status_errors + self.status_description = status_description + self.dialog_settings = dialog_settings + self.assistant_id = assistant_id + self.workspace_id = workspace_id + self.environment_id = environment_id + self.valid = valid + self.next_snapshot_version = next_snapshot_version + self.search_settings = search_settings + self.warnings = warnings + self.language = language + self.type = type + + @classmethod + def from_dict(cls, _dict: Dict) -> 'Skill': + """Initialize a Skill object from a json dictionary.""" + args = {} + if (name := _dict.get('name')) is not None: + args['name'] = name + if (description := _dict.get('description')) is not None: + args['description'] = description + if (workspace := _dict.get('workspace')) is not None: + args['workspace'] = workspace + if (skill_id := _dict.get('skill_id')) is not None: + args['skill_id'] = skill_id + if (status := _dict.get('status')) is not None: + args['status'] = status + if (status_errors := _dict.get('status_errors')) is not None: + args['status_errors'] = [ + StatusError.from_dict(v) for v in status_errors + ] + if (status_description := _dict.get('status_description')) is not None: + args['status_description'] = status_description + if (dialog_settings := _dict.get('dialog_settings')) is not None: + args['dialog_settings'] = dialog_settings + if (assistant_id := _dict.get('assistant_id')) is not None: + args['assistant_id'] = assistant_id + if (workspace_id := _dict.get('workspace_id')) is not None: + args['workspace_id'] = workspace_id + if (environment_id := _dict.get('environment_id')) is not None: + args['environment_id'] = environment_id + if (valid := _dict.get('valid')) is not None: + args['valid'] = valid + if (next_snapshot_version := + _dict.get('next_snapshot_version')) is not None: + args['next_snapshot_version'] = next_snapshot_version + if (search_settings := _dict.get('search_settings')) is not None: + args['search_settings'] = SearchSettings.from_dict(search_settings) + if (warnings := _dict.get('warnings')) is not None: + args['warnings'] = [ + SearchSkillWarning.from_dict(v) for v in warnings + ] + if (language := _dict.get('language')) is not None: + args['language'] = language + else: + raise ValueError( + 'Required property \'language\' not present in Skill JSON') + if (type := _dict.get('type')) is not None: + args['type'] = type + else: + raise ValueError( + 'Required property \'type\' not present in Skill JSON') + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Skill object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'workspace') and self.workspace is not None: + _dict['workspace'] = self.workspace + if hasattr(self, 'skill_id') and getattr(self, 'skill_id') is not None: + _dict['skill_id'] = getattr(self, 'skill_id') + if hasattr(self, 'status') and getattr(self, 'status') is not None: + _dict['status'] = getattr(self, 'status') + if hasattr(self, 'status_errors') and getattr( + self, 'status_errors') is not None: + status_errors_list = [] + for v in getattr(self, 'status_errors'): + if isinstance(v, dict): + status_errors_list.append(v) + else: + status_errors_list.append(v.to_dict()) + _dict['status_errors'] = status_errors_list + if hasattr(self, 'status_description') and getattr( + self, 'status_description') is not None: + _dict['status_description'] = getattr(self, 'status_description') + if hasattr(self, + 'dialog_settings') and self.dialog_settings is not None: + _dict['dialog_settings'] = self.dialog_settings + if hasattr(self, 'assistant_id') and getattr( + self, 'assistant_id') is not None: + _dict['assistant_id'] = getattr(self, 'assistant_id') + if hasattr(self, 'workspace_id') and getattr( + self, 'workspace_id') is not None: + _dict['workspace_id'] = getattr(self, 'workspace_id') + if hasattr(self, 'environment_id') and getattr( + self, 'environment_id') is not None: + _dict['environment_id'] = getattr(self, 'environment_id') + if hasattr(self, 'valid') and getattr(self, 'valid') is not None: + _dict['valid'] = getattr(self, 'valid') + if hasattr(self, 'next_snapshot_version') and getattr( + self, 'next_snapshot_version') is not None: + _dict['next_snapshot_version'] = getattr(self, + 'next_snapshot_version') + if hasattr(self, + 'search_settings') and self.search_settings is not None: + if isinstance(self.search_settings, dict): + _dict['search_settings'] = self.search_settings + else: + _dict['search_settings'] = self.search_settings.to_dict() + if hasattr(self, 'warnings') and getattr(self, 'warnings') is not None: + warnings_list = [] + for v in getattr(self, 'warnings'): + if isinstance(v, dict): + warnings_list.append(v) + else: + warnings_list.append(v.to_dict()) + _dict['warnings'] = warnings_list + if hasattr(self, 'language') and self.language is not None: + _dict['language'] = self.language + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Skill object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'Skill') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'Skill') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class StatusEnum(str, Enum): + """ + The current status of the skill: + - **Available**: The skill is available and ready to process messages. + - **Failed**: An asynchronous operation has failed. See the **status_errors** + property for more information about the cause of the failure. + - **Non Existent**: The skill does not exist. + - **Processing**: An asynchronous operation has not yet completed. + - **Training**: The skill is training based on new data. + """ + + AVAILABLE = 'Available' + FAILED = 'Failed' + NON_EXISTENT = 'Non Existent' + PROCESSING = 'Processing' + TRAINING = 'Training' + UNAVAILABLE = 'Unavailable' + + class TypeEnum(str, Enum): + """ + The type of skill. + """ + + ACTION = 'action' + DIALOG = 'dialog' + SEARCH = 'search' + + +class SkillImport: + """ + SkillImport. + + :param str name: (optional) The name of the skill. This string cannot contain + carriage return, newline, or tab characters. + :param str description: (optional) The description of the skill. This string + cannot contain carriage return, newline, or tab characters. + :param dict workspace: (optional) An object containing the conversational + content of an action or dialog skill. + :param str skill_id: (optional) The skill ID of the skill. + :param str status: (optional) The current status of the skill: + - **Available**: The skill is available and ready to process messages. + - **Failed**: An asynchronous operation has failed. See the **status_errors** + property for more information about the cause of the failure. + - **Non Existent**: The skill does not exist. + - **Processing**: An asynchronous operation has not yet completed. + - **Training**: The skill is training based on new data. + :param List[StatusError] status_errors: (optional) An array of messages about + errors that caused an asynchronous operation to fail. Included only if + **status**=`Failed`. + :param str status_description: (optional) The description of the failed + asynchronous operation. Included only if **status**=`Failed`. + :param dict dialog_settings: (optional) For internal use only. + :param str assistant_id: (optional) The unique identifier of the assistant the + skill is associated with. + :param str workspace_id: (optional) The unique identifier of the workspace that + contains the skill content. Included only for action and dialog skills. + :param str environment_id: (optional) The unique identifier of the environment + where the skill is defined. For action and dialog skills, this is always the + draft environment. + :param bool valid: (optional) Whether the skill is structurally valid. + :param str next_snapshot_version: (optional) The name that will be given to the + next snapshot that is created for the skill. A snapshot of each versionable + skill is saved for each new release of an assistant. + :param SearchSettings search_settings: (optional) An object describing the + search skill configuration. + **Note:** Search settings are not supported in **Import skills** requests, and + are not included in **Export skills** responses. + :param List[SearchSkillWarning] warnings: (optional) An array of warnings + describing errors with the search skill configuration. Included only for search + skills. + :param str language: The language of the skill. + :param str type: The type of skill. + """ + + def __init__( + self, + language: str, + type: str, + *, + name: Optional[str] = None, + description: Optional[str] = None, + workspace: Optional[dict] = None, + skill_id: Optional[str] = None, + status: Optional[str] = None, + status_errors: Optional[List['StatusError']] = None, + status_description: Optional[str] = None, + dialog_settings: Optional[dict] = None, + assistant_id: Optional[str] = None, + workspace_id: Optional[str] = None, + environment_id: Optional[str] = None, + valid: Optional[bool] = None, + next_snapshot_version: Optional[str] = None, + search_settings: Optional['SearchSettings'] = None, + warnings: Optional[List['SearchSkillWarning']] = None, + ) -> None: + """ + Initialize a SkillImport object. + + :param str language: The language of the skill. + :param str type: The type of skill. + :param str name: (optional) The name of the skill. This string cannot + contain carriage return, newline, or tab characters. + :param str description: (optional) The description of the skill. This + string cannot contain carriage return, newline, or tab characters. + :param dict workspace: (optional) An object containing the conversational + content of an action or dialog skill. + :param dict dialog_settings: (optional) For internal use only. + :param SearchSettings search_settings: (optional) An object describing the + search skill configuration. + **Note:** Search settings are not supported in **Import skills** requests, + and are not included in **Export skills** responses. + """ + self.name = name + self.description = description + self.workspace = workspace + self.skill_id = skill_id + self.status = status + self.status_errors = status_errors + self.status_description = status_description + self.dialog_settings = dialog_settings + self.assistant_id = assistant_id + self.workspace_id = workspace_id + self.environment_id = environment_id + self.valid = valid + self.next_snapshot_version = next_snapshot_version + self.search_settings = search_settings + self.warnings = warnings + self.language = language + self.type = type + + @classmethod + def from_dict(cls, _dict: Dict) -> 'SkillImport': + """Initialize a SkillImport object from a json dictionary.""" + args = {} + if (name := _dict.get('name')) is not None: + args['name'] = name + if (description := _dict.get('description')) is not None: + args['description'] = description + if (workspace := _dict.get('workspace')) is not None: + args['workspace'] = workspace + if (skill_id := _dict.get('skill_id')) is not None: + args['skill_id'] = skill_id + if (status := _dict.get('status')) is not None: + args['status'] = status + if (status_errors := _dict.get('status_errors')) is not None: + args['status_errors'] = [ + StatusError.from_dict(v) for v in status_errors + ] + if (status_description := _dict.get('status_description')) is not None: + args['status_description'] = status_description + if (dialog_settings := _dict.get('dialog_settings')) is not None: + args['dialog_settings'] = dialog_settings + if (assistant_id := _dict.get('assistant_id')) is not None: + args['assistant_id'] = assistant_id + if (workspace_id := _dict.get('workspace_id')) is not None: + args['workspace_id'] = workspace_id + if (environment_id := _dict.get('environment_id')) is not None: + args['environment_id'] = environment_id + if (valid := _dict.get('valid')) is not None: + args['valid'] = valid + if (next_snapshot_version := + _dict.get('next_snapshot_version')) is not None: + args['next_snapshot_version'] = next_snapshot_version + if (search_settings := _dict.get('search_settings')) is not None: + args['search_settings'] = SearchSettings.from_dict(search_settings) + if (warnings := _dict.get('warnings')) is not None: + args['warnings'] = [ + SearchSkillWarning.from_dict(v) for v in warnings + ] + if (language := _dict.get('language')) is not None: + args['language'] = language + else: + raise ValueError( + 'Required property \'language\' not present in SkillImport JSON' + ) + if (type := _dict.get('type')) is not None: + args['type'] = type + else: + raise ValueError( + 'Required property \'type\' not present in SkillImport JSON') + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SkillImport object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'workspace') and self.workspace is not None: + _dict['workspace'] = self.workspace + if hasattr(self, 'skill_id') and getattr(self, 'skill_id') is not None: + _dict['skill_id'] = getattr(self, 'skill_id') + if hasattr(self, 'status') and getattr(self, 'status') is not None: + _dict['status'] = getattr(self, 'status') + if hasattr(self, 'status_errors') and getattr( + self, 'status_errors') is not None: + status_errors_list = [] + for v in getattr(self, 'status_errors'): + if isinstance(v, dict): + status_errors_list.append(v) + else: + status_errors_list.append(v.to_dict()) + _dict['status_errors'] = status_errors_list + if hasattr(self, 'status_description') and getattr( + self, 'status_description') is not None: + _dict['status_description'] = getattr(self, 'status_description') + if hasattr(self, + 'dialog_settings') and self.dialog_settings is not None: + _dict['dialog_settings'] = self.dialog_settings + if hasattr(self, 'assistant_id') and getattr( + self, 'assistant_id') is not None: + _dict['assistant_id'] = getattr(self, 'assistant_id') + if hasattr(self, 'workspace_id') and getattr( + self, 'workspace_id') is not None: + _dict['workspace_id'] = getattr(self, 'workspace_id') + if hasattr(self, 'environment_id') and getattr( + self, 'environment_id') is not None: + _dict['environment_id'] = getattr(self, 'environment_id') + if hasattr(self, 'valid') and getattr(self, 'valid') is not None: + _dict['valid'] = getattr(self, 'valid') + if hasattr(self, 'next_snapshot_version') and getattr( + self, 'next_snapshot_version') is not None: + _dict['next_snapshot_version'] = getattr(self, + 'next_snapshot_version') + if hasattr(self, + 'search_settings') and self.search_settings is not None: + if isinstance(self.search_settings, dict): + _dict['search_settings'] = self.search_settings + else: + _dict['search_settings'] = self.search_settings.to_dict() + if hasattr(self, 'warnings') and getattr(self, 'warnings') is not None: + warnings_list = [] + for v in getattr(self, 'warnings'): + if isinstance(v, dict): + warnings_list.append(v) + else: + warnings_list.append(v.to_dict()) + _dict['warnings'] = warnings_list + if hasattr(self, 'language') and self.language is not None: + _dict['language'] = self.language + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this SkillImport object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'SkillImport') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'SkillImport') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class StatusEnum(str, Enum): + """ + The current status of the skill: + - **Available**: The skill is available and ready to process messages. + - **Failed**: An asynchronous operation has failed. See the **status_errors** + property for more information about the cause of the failure. + - **Non Existent**: The skill does not exist. + - **Processing**: An asynchronous operation has not yet completed. + - **Training**: The skill is training based on new data. + """ + + AVAILABLE = 'Available' + FAILED = 'Failed' + NON_EXISTENT = 'Non Existent' + PROCESSING = 'Processing' + TRAINING = 'Training' + UNAVAILABLE = 'Unavailable' + + class TypeEnum(str, Enum): + """ + The type of skill. + """ + + ACTION = 'action' + DIALOG = 'dialog' + + +class SkillsAsyncRequestStatus: + """ + SkillsAsyncRequestStatus. + + :param str assistant_id: (optional) The assistant ID of the assistant. + :param str status: (optional) The current status of the asynchronous operation: + - `Available`: An asynchronous export is available. + - `Completed`: An asynchronous import operation has completed successfully. + - `Failed`: An asynchronous operation has failed. See the **status_errors** + property for more information about the cause of the failure. + - `Processing`: An asynchronous operation has not yet completed. + :param str status_description: (optional) The description of the failed + asynchronous operation. Included only if **status**=`Failed`. + :param List[StatusError] status_errors: (optional) An array of messages about + errors that caused an asynchronous operation to fail. Included only if + **status**=`Failed`. + """ + + def __init__( + self, + *, + assistant_id: Optional[str] = None, + status: Optional[str] = None, + status_description: Optional[str] = None, + status_errors: Optional[List['StatusError']] = None, + ) -> None: + """ + Initialize a SkillsAsyncRequestStatus object. + + """ + self.assistant_id = assistant_id + self.status = status + self.status_description = status_description + self.status_errors = status_errors + + @classmethod + def from_dict(cls, _dict: Dict) -> 'SkillsAsyncRequestStatus': + """Initialize a SkillsAsyncRequestStatus object from a json dictionary.""" + args = {} + if (assistant_id := _dict.get('assistant_id')) is not None: + args['assistant_id'] = assistant_id + if (status := _dict.get('status')) is not None: + args['status'] = status + if (status_description := _dict.get('status_description')) is not None: + args['status_description'] = status_description + if (status_errors := _dict.get('status_errors')) is not None: + args['status_errors'] = [ + StatusError.from_dict(v) for v in status_errors + ] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SkillsAsyncRequestStatus object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'assistant_id') and getattr( + self, 'assistant_id') is not None: + _dict['assistant_id'] = getattr(self, 'assistant_id') + if hasattr(self, 'status') and getattr(self, 'status') is not None: + _dict['status'] = getattr(self, 'status') + if hasattr(self, 'status_description') and getattr( + self, 'status_description') is not None: + _dict['status_description'] = getattr(self, 'status_description') + if hasattr(self, 'status_errors') and getattr( + self, 'status_errors') is not None: + status_errors_list = [] + for v in getattr(self, 'status_errors'): + if isinstance(v, dict): + status_errors_list.append(v) + else: + status_errors_list.append(v.to_dict()) + _dict['status_errors'] = status_errors_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this SkillsAsyncRequestStatus object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'SkillsAsyncRequestStatus') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'SkillsAsyncRequestStatus') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class StatusEnum(str, Enum): + """ + The current status of the asynchronous operation: + - `Available`: An asynchronous export is available. + - `Completed`: An asynchronous import operation has completed successfully. + - `Failed`: An asynchronous operation has failed. See the **status_errors** + property for more information about the cause of the failure. + - `Processing`: An asynchronous operation has not yet completed. + """ + + AVAILABLE = 'Available' + COMPLETED = 'Completed' + FAILED = 'Failed' + PROCESSING = 'Processing' + + +class SkillsExport: + """ + SkillsExport. + + :param List[Skill] assistant_skills: An array of objects describing the skills + for the assistant. Included in responses only if **status**=`Available`. + :param AssistantState assistant_state: Status information about the skills for + the assistant. Included in responses only if **status**=`Available`. + """ + + def __init__( + self, + assistant_skills: List['Skill'], + assistant_state: 'AssistantState', + ) -> None: + """ + Initialize a SkillsExport object. + + :param List[Skill] assistant_skills: An array of objects describing the + skills for the assistant. Included in responses only if + **status**=`Available`. + :param AssistantState assistant_state: Status information about the skills + for the assistant. Included in responses only if **status**=`Available`. + """ + self.assistant_skills = assistant_skills + self.assistant_state = assistant_state + + @classmethod + def from_dict(cls, _dict: Dict) -> 'SkillsExport': + """Initialize a SkillsExport object from a json dictionary.""" + args = {} + if (assistant_skills := _dict.get('assistant_skills')) is not None: + args['assistant_skills'] = [ + Skill.from_dict(v) for v in assistant_skills + ] + else: + raise ValueError( + 'Required property \'assistant_skills\' not present in SkillsExport JSON' + ) + if (assistant_state := _dict.get('assistant_state')) is not None: + args['assistant_state'] = AssistantState.from_dict(assistant_state) + else: + raise ValueError( + 'Required property \'assistant_state\' not present in SkillsExport JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SkillsExport object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, + 'assistant_skills') and self.assistant_skills is not None: + assistant_skills_list = [] + for v in self.assistant_skills: + if isinstance(v, dict): + assistant_skills_list.append(v) + else: + assistant_skills_list.append(v.to_dict()) + _dict['assistant_skills'] = assistant_skills_list + if hasattr(self, + 'assistant_state') and self.assistant_state is not None: + if isinstance(self.assistant_state, dict): + _dict['assistant_state'] = self.assistant_state + else: + _dict['assistant_state'] = self.assistant_state.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this SkillsExport object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'SkillsExport') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'SkillsExport') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class StatefulMessageResponse: + """ + A response from the watsonx Assistant service. + + :param MessageOutput output: Assistant output to be rendered or processed by the + client. + :param MessageContext context: (optional) Context data for the conversation. You + can use this property to access context variables. The context is stored by the + assistant on a per-session basis. + **Note:** The context is included in message responses only if + **return_context**=`true` in the message request. Full context is always + included in logs. + :param str user_id: A string value that identifies the user who is interacting + with the assistant. The client must provide a unique identifier for each + individual end user who accesses the application. For user-based plans, this + user ID is used to identify unique users for billing purposes. This string + cannot contain carriage return, newline, or tab characters. If no value is + specified in the input, **user_id** is automatically set to the value of + **context.global.session_id**. + **Note:** This property is the same as the **user_id** property in the global + system context. + :param MessageOutput masked_output: (optional) Assistant output to be rendered + or processed by the client. All private data is masked or removed. + :param MessageInput masked_input: (optional) An input object that includes the + input text. All private data is masked or removed. + """ + + def __init__( + self, + output: 'MessageOutput', + user_id: str, + *, + context: Optional['MessageContext'] = None, + masked_output: Optional['MessageOutput'] = None, + masked_input: Optional['MessageInput'] = None, + ) -> None: + """ + Initialize a StatefulMessageResponse object. + + :param MessageOutput output: Assistant output to be rendered or processed + by the client. + :param str user_id: A string value that identifies the user who is + interacting with the assistant. The client must provide a unique identifier + for each individual end user who accesses the application. For user-based + plans, this user ID is used to identify unique users for billing purposes. + This string cannot contain carriage return, newline, or tab characters. If + no value is specified in the input, **user_id** is automatically set to the + value of **context.global.session_id**. + **Note:** This property is the same as the **user_id** property in the + global system context. + :param MessageContext context: (optional) Context data for the + conversation. You can use this property to access context variables. The + context is stored by the assistant on a per-session basis. + **Note:** The context is included in message responses only if + **return_context**=`true` in the message request. Full context is always + included in logs. + :param MessageOutput masked_output: (optional) Assistant output to be + rendered or processed by the client. All private data is masked or removed. + :param MessageInput masked_input: (optional) An input object that includes + the input text. All private data is masked or removed. + """ + self.output = output + self.context = context + self.user_id = user_id + self.masked_output = masked_output + self.masked_input = masked_input + + @classmethod + def from_dict(cls, _dict: Dict) -> 'StatefulMessageResponse': + """Initialize a StatefulMessageResponse object from a json dictionary.""" + args = {} + if (output := _dict.get('output')) is not None: + args['output'] = MessageOutput.from_dict(output) + else: + raise ValueError( + 'Required property \'output\' not present in StatefulMessageResponse JSON' + ) + if (context := _dict.get('context')) is not None: + args['context'] = MessageContext.from_dict(context) + if (user_id := _dict.get('user_id')) is not None: + args['user_id'] = user_id + else: + raise ValueError( + 'Required property \'user_id\' not present in StatefulMessageResponse JSON' + ) + if (masked_output := _dict.get('masked_output')) is not None: + args['masked_output'] = MessageOutput.from_dict(masked_output) + if (masked_input := _dict.get('masked_input')) is not None: + args['masked_input'] = MessageInput.from_dict(masked_input) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a StatefulMessageResponse object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'output') and self.output is not None: + if isinstance(self.output, dict): + _dict['output'] = self.output + else: + _dict['output'] = self.output.to_dict() + if hasattr(self, 'context') and self.context is not None: + if isinstance(self.context, dict): + _dict['context'] = self.context + else: + _dict['context'] = self.context.to_dict() + if hasattr(self, 'user_id') and self.user_id is not None: + _dict['user_id'] = self.user_id + if hasattr(self, 'masked_output') and self.masked_output is not None: + if isinstance(self.masked_output, dict): + _dict['masked_output'] = self.masked_output + else: + _dict['masked_output'] = self.masked_output.to_dict() + if hasattr(self, 'masked_input') and self.masked_input is not None: + if isinstance(self.masked_input, dict): + _dict['masked_input'] = self.masked_input + else: + _dict['masked_input'] = self.masked_input.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this StatefulMessageResponse object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'StatefulMessageResponse') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'StatefulMessageResponse') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class StatelessFinalResponse: + """ + Message final response content. + + :param StatelessFinalResponseOutput output: (optional) Assistant output to be + rendered or processed by the client. + :param StatelessMessageContext context: (optional) Context data for the + conversation. You can use this property to access context variables. The context + is stored by the assistant on a per-session basis. + **Note:** The context is included in message responses only if + **return_context**=`true` in the message request. Full context is always + included in logs. + :param str user_id: (optional) A string value that identifies the user who is + interacting with the assistant. The client must provide a unique identifier for + each individual end user who accesses the application. For user-based plans, + this user ID is used to identify unique users for billing purposes. This string + cannot contain carriage return, newline, or tab characters. If no value is + specified in the input, **user_id** is automatically set to the value of + **context.global.session_id**. + **Note:** This property is the same as the **user_id** property in the global + system context. + """ + + def __init__( + self, + *, + output: Optional['StatelessFinalResponseOutput'] = None, + context: Optional['StatelessMessageContext'] = None, + user_id: Optional[str] = None, + ) -> None: + """ + Initialize a StatelessFinalResponse object. + + :param StatelessFinalResponseOutput output: (optional) Assistant output to + be rendered or processed by the client. + :param StatelessMessageContext context: (optional) Context data for the + conversation. You can use this property to access context variables. The + context is stored by the assistant on a per-session basis. + **Note:** The context is included in message responses only if + **return_context**=`true` in the message request. Full context is always + included in logs. + :param str user_id: (optional) A string value that identifies the user who + is interacting with the assistant. The client must provide a unique + identifier for each individual end user who accesses the application. For + user-based plans, this user ID is used to identify unique users for billing + purposes. This string cannot contain carriage return, newline, or tab + characters. If no value is specified in the input, **user_id** is + automatically set to the value of **context.global.session_id**. + **Note:** This property is the same as the **user_id** property in the + global system context. + """ + self.output = output + self.context = context + self.user_id = user_id + + @classmethod + def from_dict(cls, _dict: Dict) -> 'StatelessFinalResponse': + """Initialize a StatelessFinalResponse object from a json dictionary.""" + args = {} + if (output := _dict.get('output')) is not None: + args['output'] = StatelessFinalResponseOutput.from_dict(output) + if (context := _dict.get('context')) is not None: + args['context'] = StatelessMessageContext.from_dict(context) + if (user_id := _dict.get('user_id')) is not None: + args['user_id'] = user_id + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a StatelessFinalResponse object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'output') and self.output is not None: + if isinstance(self.output, dict): + _dict['output'] = self.output + else: + _dict['output'] = self.output.to_dict() + if hasattr(self, 'context') and self.context is not None: + if isinstance(self.context, dict): + _dict['context'] = self.context + else: + _dict['context'] = self.context.to_dict() + if hasattr(self, 'user_id') and self.user_id is not None: + _dict['user_id'] = self.user_id + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this StatelessFinalResponse object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'StatelessFinalResponse') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'StatelessFinalResponse') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class StatelessFinalResponseOutput: + """ + Assistant output to be rendered or processed by the client. + + :param List[RuntimeResponseGeneric] generic: (optional) Output intended for any + channel. It is the responsibility of the client application to implement the + supported response types. + :param List[RuntimeIntent] intents: (optional) An array of intents recognized in + the user input, sorted in descending order of confidence. + :param List[RuntimeEntity] entities: (optional) An array of entities identified + in the user input. + :param List[DialogNodeAction] actions: (optional) An array of objects describing + any actions requested by the dialog node. + :param MessageOutputDebug debug: (optional) Additional detailed information + about a message response and how it was generated. + :param dict user_defined: (optional) An object containing any custom properties + included in the response. This object includes any arbitrary properties defined + in the dialog JSON editor as part of the dialog node output. + :param MessageOutputSpelling spelling: (optional) Properties describing any + spelling corrections in the user input that was received. + :param List[MessageOutputLLMMetadata] llm_metadata: (optional) An array of + objects that provide information about calls to large language models that + occured as part of handling this message. + :param StatelessMessageContext streaming_metadata: + """ + + def __init__( + self, + streaming_metadata: 'StatelessMessageContext', + *, + generic: Optional[List['RuntimeResponseGeneric']] = None, + intents: Optional[List['RuntimeIntent']] = None, + entities: Optional[List['RuntimeEntity']] = None, + actions: Optional[List['DialogNodeAction']] = None, + debug: Optional['MessageOutputDebug'] = None, + user_defined: Optional[dict] = None, + spelling: Optional['MessageOutputSpelling'] = None, + llm_metadata: Optional[List['MessageOutputLLMMetadata']] = None, + ) -> None: + """ + Initialize a StatelessFinalResponseOutput object. + + :param StatelessMessageContext streaming_metadata: + :param List[RuntimeResponseGeneric] generic: (optional) Output intended for + any channel. It is the responsibility of the client application to + implement the supported response types. + :param List[RuntimeIntent] intents: (optional) An array of intents + recognized in the user input, sorted in descending order of confidence. + :param List[RuntimeEntity] entities: (optional) An array of entities + identified in the user input. + :param List[DialogNodeAction] actions: (optional) An array of objects + describing any actions requested by the dialog node. + :param MessageOutputDebug debug: (optional) Additional detailed information + about a message response and how it was generated. + :param dict user_defined: (optional) An object containing any custom + properties included in the response. This object includes any arbitrary + properties defined in the dialog JSON editor as part of the dialog node + output. + :param MessageOutputSpelling spelling: (optional) Properties describing any + spelling corrections in the user input that was received. + :param List[MessageOutputLLMMetadata] llm_metadata: (optional) An array of + objects that provide information about calls to large language models that + occured as part of handling this message. + """ + self.generic = generic + self.intents = intents + self.entities = entities + self.actions = actions + self.debug = debug + self.user_defined = user_defined + self.spelling = spelling + self.llm_metadata = llm_metadata + self.streaming_metadata = streaming_metadata + + @classmethod + def from_dict(cls, _dict: Dict) -> 'StatelessFinalResponseOutput': + """Initialize a StatelessFinalResponseOutput object from a json dictionary.""" + args = {} + if (generic := _dict.get('generic')) is not None: + args['generic'] = [ + RuntimeResponseGeneric.from_dict(v) for v in generic + ] + if (intents := _dict.get('intents')) is not None: + args['intents'] = [RuntimeIntent.from_dict(v) for v in intents] + if (entities := _dict.get('entities')) is not None: + args['entities'] = [RuntimeEntity.from_dict(v) for v in entities] + if (actions := _dict.get('actions')) is not None: + args['actions'] = [DialogNodeAction.from_dict(v) for v in actions] + if (debug := _dict.get('debug')) is not None: + args['debug'] = MessageOutputDebug.from_dict(debug) + if (user_defined := _dict.get('user_defined')) is not None: + args['user_defined'] = user_defined + if (spelling := _dict.get('spelling')) is not None: + args['spelling'] = MessageOutputSpelling.from_dict(spelling) + if (llm_metadata := _dict.get('llm_metadata')) is not None: + args['llm_metadata'] = [ + MessageOutputLLMMetadata.from_dict(v) for v in llm_metadata + ] + if (streaming_metadata := _dict.get('streaming_metadata')) is not None: + args['streaming_metadata'] = StatelessMessageContext.from_dict( + streaming_metadata) + else: + raise ValueError( + 'Required property \'streaming_metadata\' not present in StatelessFinalResponseOutput JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a StatelessFinalResponseOutput object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'generic') and self.generic is not None: + generic_list = [] + for v in self.generic: + if isinstance(v, dict): + generic_list.append(v) + else: + generic_list.append(v.to_dict()) + _dict['generic'] = generic_list + if hasattr(self, 'intents') and self.intents is not None: + intents_list = [] + for v in self.intents: + if isinstance(v, dict): + intents_list.append(v) + else: + intents_list.append(v.to_dict()) + _dict['intents'] = intents_list + if hasattr(self, 'entities') and self.entities is not None: + entities_list = [] + for v in self.entities: + if isinstance(v, dict): + entities_list.append(v) + else: + entities_list.append(v.to_dict()) + _dict['entities'] = entities_list + if hasattr(self, 'actions') and self.actions is not None: + actions_list = [] + for v in self.actions: + if isinstance(v, dict): + actions_list.append(v) + else: + actions_list.append(v.to_dict()) + _dict['actions'] = actions_list + if hasattr(self, 'debug') and self.debug is not None: + if isinstance(self.debug, dict): + _dict['debug'] = self.debug + else: + _dict['debug'] = self.debug.to_dict() + if hasattr(self, 'user_defined') and self.user_defined is not None: + _dict['user_defined'] = self.user_defined + if hasattr(self, 'spelling') and self.spelling is not None: + if isinstance(self.spelling, dict): + _dict['spelling'] = self.spelling + else: + _dict['spelling'] = self.spelling.to_dict() + if hasattr(self, 'llm_metadata') and self.llm_metadata is not None: + llm_metadata_list = [] + for v in self.llm_metadata: + if isinstance(v, dict): + llm_metadata_list.append(v) + else: + llm_metadata_list.append(v.to_dict()) + _dict['llm_metadata'] = llm_metadata_list + if hasattr( + self, + 'streaming_metadata') and self.streaming_metadata is not None: + if isinstance(self.streaming_metadata, dict): + _dict['streaming_metadata'] = self.streaming_metadata + else: + _dict['streaming_metadata'] = self.streaming_metadata.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this StatelessFinalResponseOutput object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'StatelessFinalResponseOutput') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'StatelessFinalResponseOutput') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class StatelessMessageContext: + """ + StatelessMessageContext. + + :param StatelessMessageContextGlobal global_: (optional) Session context data + that is shared by all skills used by the assistant. + :param StatelessMessageContextSkills skills: (optional) Context data specific to + particular skills used by the assistant. + :param dict integrations: (optional) An object containing context data that is + specific to particular integrations. For more information, see the + [documentation](https://cloud.ibm.com/docs/assistant?topic=assistant-dialog-integrations). + """ + + def __init__( + self, + *, + global_: Optional['StatelessMessageContextGlobal'] = None, + skills: Optional['StatelessMessageContextSkills'] = None, + integrations: Optional[dict] = None, + ) -> None: + """ + Initialize a StatelessMessageContext object. + + :param StatelessMessageContextGlobal global_: (optional) Session context + data that is shared by all skills used by the assistant. + :param StatelessMessageContextSkills skills: (optional) Context data + specific to particular skills used by the assistant. + :param dict integrations: (optional) An object containing context data that + is specific to particular integrations. For more information, see the + [documentation](https://cloud.ibm.com/docs/assistant?topic=assistant-dialog-integrations). + """ + self.global_ = global_ + self.skills = skills + self.integrations = integrations + + @classmethod + def from_dict(cls, _dict: Dict) -> 'StatelessMessageContext': + """Initialize a StatelessMessageContext object from a json dictionary.""" + args = {} + if (global_ := _dict.get('global')) is not None: + args['global_'] = StatelessMessageContextGlobal.from_dict(global_) + if (skills := _dict.get('skills')) is not None: + args['skills'] = StatelessMessageContextSkills.from_dict(skills) + if (integrations := _dict.get('integrations')) is not None: + args['integrations'] = integrations + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a StatelessMessageContext object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'global_') and self.global_ is not None: + if isinstance(self.global_, dict): + _dict['global'] = self.global_ + else: + _dict['global'] = self.global_.to_dict() + if hasattr(self, 'skills') and self.skills is not None: + if isinstance(self.skills, dict): + _dict['skills'] = self.skills + else: + _dict['skills'] = self.skills.to_dict() + if hasattr(self, 'integrations') and self.integrations is not None: + _dict['integrations'] = self.integrations + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this StatelessMessageContext object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'StatelessMessageContext') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'StatelessMessageContext') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class StatelessMessageContextGlobal: + """ + Session context data that is shared by all skills used by the assistant. + + :param MessageContextGlobalSystem system: (optional) Built-in system properties + that apply to all skills used by the assistant. + :param str session_id: (optional) The unique identifier of the session. + """ + + def __init__( + self, + *, + system: Optional['MessageContextGlobalSystem'] = None, + session_id: Optional[str] = None, + ) -> None: + """ + Initialize a StatelessMessageContextGlobal object. + + :param MessageContextGlobalSystem system: (optional) Built-in system + properties that apply to all skills used by the assistant. + :param str session_id: (optional) The unique identifier of the session. + """ + self.system = system + self.session_id = session_id + + @classmethod + def from_dict(cls, _dict: Dict) -> 'StatelessMessageContextGlobal': + """Initialize a StatelessMessageContextGlobal object from a json dictionary.""" + args = {} + if (system := _dict.get('system')) is not None: + args['system'] = MessageContextGlobalSystem.from_dict(system) + if (session_id := _dict.get('session_id')) is not None: + args['session_id'] = session_id + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a StatelessMessageContextGlobal object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'system') and self.system is not None: + if isinstance(self.system, dict): + _dict['system'] = self.system + else: + _dict['system'] = self.system.to_dict() + if hasattr(self, 'session_id') and self.session_id is not None: + _dict['session_id'] = self.session_id + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this StatelessMessageContextGlobal object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'StatelessMessageContextGlobal') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'StatelessMessageContextGlobal') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class StatelessMessageContextSkills: + """ + Context data specific to particular skills used by the assistant. + + :param MessageContextDialogSkill main_skill: (optional) Context variables that + are used by the dialog skill. + :param StatelessMessageContextSkillsActionsSkill actions_skill: (optional) + Context variables that are used by the action skill. + """ + + def __init__( + self, + *, + main_skill: Optional['MessageContextDialogSkill'] = None, + actions_skill: Optional[ + 'StatelessMessageContextSkillsActionsSkill'] = None, + ) -> None: + """ + Initialize a StatelessMessageContextSkills object. + + :param MessageContextDialogSkill main_skill: (optional) Context variables + that are used by the dialog skill. + :param StatelessMessageContextSkillsActionsSkill actions_skill: (optional) + Context variables that are used by the action skill. + """ + self.main_skill = main_skill + self.actions_skill = actions_skill + + @classmethod + def from_dict(cls, _dict: Dict) -> 'StatelessMessageContextSkills': + """Initialize a StatelessMessageContextSkills object from a json dictionary.""" + args = {} + if (main_skill := _dict.get('main skill')) is not None: + args['main_skill'] = MessageContextDialogSkill.from_dict(main_skill) + if (actions_skill := _dict.get('actions skill')) is not None: + args[ + 'actions_skill'] = StatelessMessageContextSkillsActionsSkill.from_dict( + actions_skill) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a StatelessMessageContextSkills object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'main_skill') and self.main_skill is not None: + if isinstance(self.main_skill, dict): + _dict['main skill'] = self.main_skill + else: + _dict['main skill'] = self.main_skill.to_dict() + if hasattr(self, 'actions_skill') and self.actions_skill is not None: + if isinstance(self.actions_skill, dict): + _dict['actions skill'] = self.actions_skill + else: + _dict['actions skill'] = self.actions_skill.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this StatelessMessageContextSkills object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'StatelessMessageContextSkills') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'StatelessMessageContextSkills') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class StatelessMessageContextSkillsActionsSkill: + """ + Context variables that are used by the action skill. + + :param dict user_defined: (optional) An object containing any arbitrary + variables that can be read and written by a particular skill. + :param MessageContextSkillSystem system: (optional) System context data used by + the skill. + :param dict action_variables: (optional) An object containing action variables. + Action variables can be accessed only by steps in the same action, and do not + persist after the action ends. + :param dict skill_variables: (optional) An object containing skill variables. + (In the watsonx Assistant user interface, skill variables are called _session + variables_.) Skill variables can be accessed by any action and persist for the + duration of the session. + :param dict private_action_variables: (optional) An object containing private + action variables. Action variables can be accessed only by steps in the same + action, and do not persist after the action ends. Private variables are + encrypted. + :param dict private_skill_variables: (optional) An object containing private + skill variables. (In the watsonx Assistant user interface, skill variables are + called _session variables_.) Skill variables can be accessed by any action and + persist for the duration of the session. Private variables are encrypted. + """ + + def __init__( + self, + *, + user_defined: Optional[dict] = None, + system: Optional['MessageContextSkillSystem'] = None, + action_variables: Optional[dict] = None, + skill_variables: Optional[dict] = None, + private_action_variables: Optional[dict] = None, + private_skill_variables: Optional[dict] = None, + ) -> None: + """ + Initialize a StatelessMessageContextSkillsActionsSkill object. + + :param dict user_defined: (optional) An object containing any arbitrary + variables that can be read and written by a particular skill. + :param MessageContextSkillSystem system: (optional) System context data + used by the skill. + :param dict action_variables: (optional) An object containing action + variables. Action variables can be accessed only by steps in the same + action, and do not persist after the action ends. + :param dict skill_variables: (optional) An object containing skill + variables. (In the watsonx Assistant user interface, skill variables are + called _session variables_.) Skill variables can be accessed by any action + and persist for the duration of the session. + :param dict private_action_variables: (optional) An object containing + private action variables. Action variables can be accessed only by steps in + the same action, and do not persist after the action ends. Private + variables are encrypted. + :param dict private_skill_variables: (optional) An object containing + private skill variables. (In the watsonx Assistant user interface, skill + variables are called _session variables_.) Skill variables can be accessed + by any action and persist for the duration of the session. Private + variables are encrypted. + """ + self.user_defined = user_defined + self.system = system + self.action_variables = action_variables + self.skill_variables = skill_variables + self.private_action_variables = private_action_variables + self.private_skill_variables = private_skill_variables + + @classmethod + def from_dict(cls, + _dict: Dict) -> 'StatelessMessageContextSkillsActionsSkill': + """Initialize a StatelessMessageContextSkillsActionsSkill object from a json dictionary.""" + args = {} + if (user_defined := _dict.get('user_defined')) is not None: + args['user_defined'] = user_defined + if (system := _dict.get('system')) is not None: + args['system'] = MessageContextSkillSystem.from_dict(system) + if (action_variables := _dict.get('action_variables')) is not None: + args['action_variables'] = action_variables + if (skill_variables := _dict.get('skill_variables')) is not None: + args['skill_variables'] = skill_variables + if (private_action_variables := + _dict.get('private_action_variables')) is not None: + args['private_action_variables'] = private_action_variables + if (private_skill_variables := + _dict.get('private_skill_variables')) is not None: + args['private_skill_variables'] = private_skill_variables + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a StatelessMessageContextSkillsActionsSkill object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'user_defined') and self.user_defined is not None: + _dict['user_defined'] = self.user_defined + if hasattr(self, 'system') and self.system is not None: + if isinstance(self.system, dict): + _dict['system'] = self.system + else: + _dict['system'] = self.system.to_dict() + if hasattr(self, + 'action_variables') and self.action_variables is not None: + _dict['action_variables'] = self.action_variables + if hasattr(self, + 'skill_variables') and self.skill_variables is not None: + _dict['skill_variables'] = self.skill_variables + if hasattr(self, 'private_action_variables' + ) and self.private_action_variables is not None: + _dict['private_action_variables'] = self.private_action_variables + if hasattr(self, 'private_skill_variables' + ) and self.private_skill_variables is not None: + _dict['private_skill_variables'] = self.private_skill_variables + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this StatelessMessageContextSkillsActionsSkill object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, + other: 'StatelessMessageContextSkillsActionsSkill') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, + other: 'StatelessMessageContextSkillsActionsSkill') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class StatelessMessageInput: + """ + An input object that includes the input text. + + :param str message_type: (optional) The type of the message: + - `text`: The user input is processed normally by the assistant. + - `search`: Only search results are returned. (Any dialog or action skill is + bypassed.) + **Note:** A `search` message results in an error if no search skill is + configured for the assistant. + :param str text: (optional) The text of the user input. This string cannot + contain carriage return, newline, or tab characters. + :param List[RuntimeIntent] intents: (optional) Intents to use when evaluating + the user input. Include intents from the previous response to continue using + those intents rather than trying to recognize intents in the new input. + :param List[RuntimeEntity] entities: (optional) Entities to use when evaluating + the message. Include entities from the previous response to continue using those + entities rather than detecting entities in the new input. + :param str suggestion_id: (optional) For internal use only. + :param List[MessageInputAttachment] attachments: (optional) An array of + multimedia attachments to be sent with the message. Attachments are not + processed by the assistant itself, but can be sent to external services by + webhooks. + **Note:** Attachments are not supported on IBM Cloud Pak for Data. + :param RequestAnalytics analytics: (optional) An optional object containing + analytics data. Currently, this data is used only for events sent to the Segment + extension. + :param StatelessMessageInputOptions options: (optional) Optional properties that + control how the assistant responds. + """ + + def __init__( + self, + *, + message_type: Optional[str] = None, + text: Optional[str] = None, + intents: Optional[List['RuntimeIntent']] = None, + entities: Optional[List['RuntimeEntity']] = None, + suggestion_id: Optional[str] = None, + attachments: Optional[List['MessageInputAttachment']] = None, + analytics: Optional['RequestAnalytics'] = None, + options: Optional['StatelessMessageInputOptions'] = None, + ) -> None: + """ + Initialize a StatelessMessageInput object. + + :param str message_type: (optional) The type of the message: + - `text`: The user input is processed normally by the assistant. + - `search`: Only search results are returned. (Any dialog or action skill + is bypassed.) + **Note:** A `search` message results in an error if no search skill is + configured for the assistant. + :param str text: (optional) The text of the user input. This string cannot + contain carriage return, newline, or tab characters. + :param List[RuntimeIntent] intents: (optional) Intents to use when + evaluating the user input. Include intents from the previous response to + continue using those intents rather than trying to recognize intents in the + new input. + :param List[RuntimeEntity] entities: (optional) Entities to use when + evaluating the message. Include entities from the previous response to + continue using those entities rather than detecting entities in the new + input. + :param str suggestion_id: (optional) For internal use only. + :param List[MessageInputAttachment] attachments: (optional) An array of + multimedia attachments to be sent with the message. Attachments are not + processed by the assistant itself, but can be sent to external services by + webhooks. + **Note:** Attachments are not supported on IBM Cloud Pak for Data. + :param RequestAnalytics analytics: (optional) An optional object containing + analytics data. Currently, this data is used only for events sent to the + Segment extension. + :param StatelessMessageInputOptions options: (optional) Optional properties + that control how the assistant responds. + """ + self.message_type = message_type + self.text = text + self.intents = intents + self.entities = entities + self.suggestion_id = suggestion_id + self.attachments = attachments + self.analytics = analytics + self.options = options + + @classmethod + def from_dict(cls, _dict: Dict) -> 'StatelessMessageInput': + """Initialize a StatelessMessageInput object from a json dictionary.""" + args = {} + if (message_type := _dict.get('message_type')) is not None: + args['message_type'] = message_type + if (text := _dict.get('text')) is not None: + args['text'] = text + if (intents := _dict.get('intents')) is not None: + args['intents'] = [RuntimeIntent.from_dict(v) for v in intents] + if (entities := _dict.get('entities')) is not None: + args['entities'] = [RuntimeEntity.from_dict(v) for v in entities] + if (suggestion_id := _dict.get('suggestion_id')) is not None: + args['suggestion_id'] = suggestion_id + if (attachments := _dict.get('attachments')) is not None: + args['attachments'] = [ + MessageInputAttachment.from_dict(v) for v in attachments + ] + if (analytics := _dict.get('analytics')) is not None: + args['analytics'] = RequestAnalytics.from_dict(analytics) + if (options := _dict.get('options')) is not None: + args['options'] = StatelessMessageInputOptions.from_dict(options) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a StatelessMessageInput object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'message_type') and self.message_type is not None: + _dict['message_type'] = self.message_type + if hasattr(self, 'text') and self.text is not None: + _dict['text'] = self.text + if hasattr(self, 'intents') and self.intents is not None: + intents_list = [] + for v in self.intents: + if isinstance(v, dict): + intents_list.append(v) + else: + intents_list.append(v.to_dict()) + _dict['intents'] = intents_list + if hasattr(self, 'entities') and self.entities is not None: + entities_list = [] + for v in self.entities: + if isinstance(v, dict): + entities_list.append(v) + else: + entities_list.append(v.to_dict()) + _dict['entities'] = entities_list + if hasattr(self, 'suggestion_id') and self.suggestion_id is not None: + _dict['suggestion_id'] = self.suggestion_id + if hasattr(self, 'attachments') and self.attachments is not None: + attachments_list = [] + for v in self.attachments: + if isinstance(v, dict): + attachments_list.append(v) + else: + attachments_list.append(v.to_dict()) + _dict['attachments'] = attachments_list + if hasattr(self, 'analytics') and self.analytics is not None: + if isinstance(self.analytics, dict): + _dict['analytics'] = self.analytics + else: + _dict['analytics'] = self.analytics.to_dict() + if hasattr(self, 'options') and self.options is not None: + if isinstance(self.options, dict): + _dict['options'] = self.options + else: + _dict['options'] = self.options.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this StatelessMessageInput object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'StatelessMessageInput') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'StatelessMessageInput') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class MessageTypeEnum(str, Enum): + """ + The type of the message: + - `text`: The user input is processed normally by the assistant. + - `search`: Only search results are returned. (Any dialog or action skill is + bypassed.) + **Note:** A `search` message results in an error if no search skill is configured + for the assistant. + """ + + TEXT = 'text' + SEARCH = 'search' + + +class StatelessMessageInputOptions: + """ + Optional properties that control how the assistant responds. + + :param bool restart: (optional) Whether to restart dialog processing at the root + of the dialog, regardless of any previously visited nodes. **Note:** This does + not affect `turn_count` or any other context variables. + :param bool alternate_intents: (optional) Whether to return more than one + intent. Set to `true` to return all matching intents. + :param bool async_callout: (optional) Whether custom extension callouts are + executed asynchronously. Asynchronous execution means the response to the + extension callout will be processed on the subsequent message call, the initial + message response signals to the client that the operation may be long running. + With synchronous execution the custom extension is executed and returns the + response in a single message turn. **Note:** **async_callout** defaults to true + for API versions earlier than 2023-06-15. + :param MessageInputOptionsSpelling spelling: (optional) Spelling correction + options for the message. Any options specified on an individual message override + the settings configured for the skill. + :param bool debug: (optional) Whether to return additional diagnostic + information. Set to `true` to return additional information in the + `output.debug` property. + """ + + def __init__( + self, + *, + restart: Optional[bool] = None, + alternate_intents: Optional[bool] = None, + async_callout: Optional[bool] = None, + spelling: Optional['MessageInputOptionsSpelling'] = None, + debug: Optional[bool] = None, + ) -> None: + """ + Initialize a StatelessMessageInputOptions object. + + :param bool restart: (optional) Whether to restart dialog processing at the + root of the dialog, regardless of any previously visited nodes. **Note:** + This does not affect `turn_count` or any other context variables. + :param bool alternate_intents: (optional) Whether to return more than one + intent. Set to `true` to return all matching intents. + :param bool async_callout: (optional) Whether custom extension callouts are + executed asynchronously. Asynchronous execution means the response to the + extension callout will be processed on the subsequent message call, the + initial message response signals to the client that the operation may be + long running. With synchronous execution the custom extension is executed + and returns the response in a single message turn. **Note:** + **async_callout** defaults to true for API versions earlier than + 2023-06-15. + :param MessageInputOptionsSpelling spelling: (optional) Spelling correction + options for the message. Any options specified on an individual message + override the settings configured for the skill. + :param bool debug: (optional) Whether to return additional diagnostic + information. Set to `true` to return additional information in the + `output.debug` property. + """ + self.restart = restart + self.alternate_intents = alternate_intents + self.async_callout = async_callout + self.spelling = spelling + self.debug = debug + + @classmethod + def from_dict(cls, _dict: Dict) -> 'StatelessMessageInputOptions': + """Initialize a StatelessMessageInputOptions object from a json dictionary.""" + args = {} + if (restart := _dict.get('restart')) is not None: + args['restart'] = restart + if (alternate_intents := _dict.get('alternate_intents')) is not None: + args['alternate_intents'] = alternate_intents + if (async_callout := _dict.get('async_callout')) is not None: + args['async_callout'] = async_callout + if (spelling := _dict.get('spelling')) is not None: + args['spelling'] = MessageInputOptionsSpelling.from_dict(spelling) + if (debug := _dict.get('debug')) is not None: + args['debug'] = debug + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a StatelessMessageInputOptions object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'restart') and self.restart is not None: + _dict['restart'] = self.restart + if hasattr(self, + 'alternate_intents') and self.alternate_intents is not None: + _dict['alternate_intents'] = self.alternate_intents + if hasattr(self, 'async_callout') and self.async_callout is not None: + _dict['async_callout'] = self.async_callout + if hasattr(self, 'spelling') and self.spelling is not None: + if isinstance(self.spelling, dict): + _dict['spelling'] = self.spelling + else: + _dict['spelling'] = self.spelling.to_dict() + if hasattr(self, 'debug') and self.debug is not None: + _dict['debug'] = self.debug + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this StatelessMessageInputOptions object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'StatelessMessageInputOptions') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'StatelessMessageInputOptions') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class StatelessMessageResponse: + """ + A stateless response from the watsonx Assistant service. + + :param MessageOutput output: Assistant output to be rendered or processed by the + client. + :param StatelessMessageContext context: Context data for the conversation. You + can use this property to access context variables. The context is not stored by + the assistant; to maintain session state, include the context from the response + in the next message. + :param MessageOutput masked_output: (optional) Assistant output to be rendered + or processed by the client. All private data is masked or removed. + :param MessageInput masked_input: (optional) An input object that includes the + input text. All private data is masked or removed. + :param str user_id: (optional) A string value that identifies the user who is + interacting with the assistant. The client must provide a unique identifier for + each individual end user who accesses the application. For user-based plans, + this user ID is used to identify unique users for billing purposes. This string + cannot contain carriage return, newline, or tab characters. If no value is + specified in the input, **user_id** is automatically set to the value of + **context.global.session_id**. + **Note:** This property is the same as the **user_id** property in the global + system context. + """ + + def __init__( + self, + output: 'MessageOutput', + context: 'StatelessMessageContext', + *, + masked_output: Optional['MessageOutput'] = None, + masked_input: Optional['MessageInput'] = None, + user_id: Optional[str] = None, + ) -> None: + """ + Initialize a StatelessMessageResponse object. + + :param MessageOutput output: Assistant output to be rendered or processed + by the client. + :param StatelessMessageContext context: Context data for the conversation. + You can use this property to access context variables. The context is not + stored by the assistant; to maintain session state, include the context + from the response in the next message. + :param MessageOutput masked_output: (optional) Assistant output to be + rendered or processed by the client. All private data is masked or removed. + :param MessageInput masked_input: (optional) An input object that includes + the input text. All private data is masked or removed. + :param str user_id: (optional) A string value that identifies the user who + is interacting with the assistant. The client must provide a unique + identifier for each individual end user who accesses the application. For + user-based plans, this user ID is used to identify unique users for billing + purposes. This string cannot contain carriage return, newline, or tab + characters. If no value is specified in the input, **user_id** is + automatically set to the value of **context.global.session_id**. + **Note:** This property is the same as the **user_id** property in the + global system context. + """ + self.output = output + self.context = context + self.masked_output = masked_output + self.masked_input = masked_input + self.user_id = user_id + + @classmethod + def from_dict(cls, _dict: Dict) -> 'StatelessMessageResponse': + """Initialize a StatelessMessageResponse object from a json dictionary.""" + args = {} + if (output := _dict.get('output')) is not None: + args['output'] = MessageOutput.from_dict(output) + else: + raise ValueError( + 'Required property \'output\' not present in StatelessMessageResponse JSON' + ) + if (context := _dict.get('context')) is not None: + args['context'] = StatelessMessageContext.from_dict(context) + else: + raise ValueError( + 'Required property \'context\' not present in StatelessMessageResponse JSON' + ) + if (masked_output := _dict.get('masked_output')) is not None: + args['masked_output'] = MessageOutput.from_dict(masked_output) + if (masked_input := _dict.get('masked_input')) is not None: + args['masked_input'] = MessageInput.from_dict(masked_input) + if (user_id := _dict.get('user_id')) is not None: + args['user_id'] = user_id + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a StatelessMessageResponse object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'output') and self.output is not None: + if isinstance(self.output, dict): + _dict['output'] = self.output + else: + _dict['output'] = self.output.to_dict() + if hasattr(self, 'context') and self.context is not None: + if isinstance(self.context, dict): + _dict['context'] = self.context + else: + _dict['context'] = self.context.to_dict() + if hasattr(self, 'masked_output') and self.masked_output is not None: + if isinstance(self.masked_output, dict): + _dict['masked_output'] = self.masked_output + else: + _dict['masked_output'] = self.masked_output.to_dict() + if hasattr(self, 'masked_input') and self.masked_input is not None: + if isinstance(self.masked_input, dict): + _dict['masked_input'] = self.masked_input + else: + _dict['masked_input'] = self.masked_input.to_dict() + if hasattr(self, 'user_id') and self.user_id is not None: + _dict['user_id'] = self.user_id + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this StatelessMessageResponse object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'StatelessMessageResponse') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'StatelessMessageResponse') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class StatelessMessageStreamResponse: + """ + A stateless streamed response form the watsonx Assistant service. + + """ + + def __init__(self,) -> None: + """ + Initialize a StatelessMessageStreamResponse object. + + """ + msg = "Cannot instantiate base class. Instead, instantiate one of the defined subclasses: {0}".format( + ", ".join([ + 'StatelessMessageStreamResponseMessageStreamPartialItem', + 'StatelessMessageStreamResponseMessageStreamCompleteItem', + 'StatelessMessageStreamResponseStatelessMessageStreamFinalResponse' + ])) + raise Exception(msg) + + +class StatusError: + """ + An object describing an error that occurred during processing of an asynchronous + operation. + + :param str message: (optional) The text of the error message. + """ + + def __init__( + self, + *, + message: Optional[str] = None, + ) -> None: + """ + Initialize a StatusError object. + + :param str message: (optional) The text of the error message. + """ + self.message = message + + @classmethod + def from_dict(cls, _dict: Dict) -> 'StatusError': + """Initialize a StatusError object from a json dictionary.""" + args = {} + if (message := _dict.get('message')) is not None: + args['message'] = message + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a StatusError object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'message') and self.message is not None: + _dict['message'] = self.message + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this StatusError object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'StatusError') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'StatusError') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class TurnEventActionSource: + """ + TurnEventActionSource. + + :param str type: (optional) The type of turn event. + :param str action: (optional) An action that was visited during processing of + the message. + :param str action_title: (optional) The title of the action. + :param str condition: (optional) The condition that triggered the dialog node. + """ + + def __init__( + self, + *, + type: Optional[str] = None, + action: Optional[str] = None, + action_title: Optional[str] = None, + condition: Optional[str] = None, + ) -> None: + """ + Initialize a TurnEventActionSource object. + + :param str type: (optional) The type of turn event. + :param str action: (optional) An action that was visited during processing + of the message. + :param str action_title: (optional) The title of the action. + :param str condition: (optional) The condition that triggered the dialog + node. + """ + self.type = type + self.action = action + self.action_title = action_title + self.condition = condition + + @classmethod + def from_dict(cls, _dict: Dict) -> 'TurnEventActionSource': + """Initialize a TurnEventActionSource object from a json dictionary.""" + args = {} + if (type := _dict.get('type')) is not None: + args['type'] = type + if (action := _dict.get('action')) is not None: + args['action'] = action + if (action_title := _dict.get('action_title')) is not None: + args['action_title'] = action_title + if (condition := _dict.get('condition')) is not None: + args['condition'] = condition + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a TurnEventActionSource object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'action') and self.action is not None: + _dict['action'] = self.action + if hasattr(self, 'action_title') and self.action_title is not None: + _dict['action_title'] = self.action_title + if hasattr(self, 'condition') and self.condition is not None: + _dict['condition'] = self.condition + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this TurnEventActionSource object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'TurnEventActionSource') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'TurnEventActionSource') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class TypeEnum(str, Enum): + """ + The type of turn event. + """ + + ACTION = 'action' + + +class TurnEventCalloutCallout: + """ + TurnEventCalloutCallout. + + :param str type: (optional) The type of callout. Currently, the only supported + value is `integration_interaction` (for calls to extensions). + :param dict internal: (optional) For internal use only. + :param str result_variable: (optional) The name of the variable where the + callout result is stored. + :param TurnEventCalloutCalloutRequest request: (optional) The request object + executed to the external server specified by the extension. + :param TurnEventCalloutCalloutResponse response: (optional) The response object + received by the external server made by the extension. + """ + + def __init__( + self, + *, + type: Optional[str] = None, + internal: Optional[dict] = None, + result_variable: Optional[str] = None, + request: Optional['TurnEventCalloutCalloutRequest'] = None, + response: Optional['TurnEventCalloutCalloutResponse'] = None, + ) -> None: + """ + Initialize a TurnEventCalloutCallout object. + + :param str type: (optional) The type of callout. Currently, the only + supported value is `integration_interaction` (for calls to extensions). + :param dict internal: (optional) For internal use only. + :param str result_variable: (optional) The name of the variable where the + callout result is stored. + :param TurnEventCalloutCalloutRequest request: (optional) The request + object executed to the external server specified by the extension. + :param TurnEventCalloutCalloutResponse response: (optional) The response + object received by the external server made by the extension. + """ + self.type = type + self.internal = internal + self.result_variable = result_variable + self.request = request + self.response = response + + @classmethod + def from_dict(cls, _dict: Dict) -> 'TurnEventCalloutCallout': + """Initialize a TurnEventCalloutCallout object from a json dictionary.""" + args = {} + if (type := _dict.get('type')) is not None: + args['type'] = type + if (internal := _dict.get('internal')) is not None: + args['internal'] = internal + if (result_variable := _dict.get('result_variable')) is not None: + args['result_variable'] = result_variable + if (request := _dict.get('request')) is not None: + args['request'] = TurnEventCalloutCalloutRequest.from_dict(request) + if (response := _dict.get('response')) is not None: + args['response'] = TurnEventCalloutCalloutResponse.from_dict( + response) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a TurnEventCalloutCallout object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'internal') and self.internal is not None: + _dict['internal'] = self.internal + if hasattr(self, + 'result_variable') and self.result_variable is not None: + _dict['result_variable'] = self.result_variable + if hasattr(self, 'request') and self.request is not None: + if isinstance(self.request, dict): + _dict['request'] = self.request + else: + _dict['request'] = self.request.to_dict() + if hasattr(self, 'response') and self.response is not None: + if isinstance(self.response, dict): + _dict['response'] = self.response + else: + _dict['response'] = self.response.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this TurnEventCalloutCallout object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'TurnEventCalloutCallout') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'TurnEventCalloutCallout') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class TypeEnum(str, Enum): + """ + The type of callout. Currently, the only supported value is + `integration_interaction` (for calls to extensions). + """ + + INTEGRATION_INTERACTION = 'integration_interaction' + + +class TurnEventCalloutCalloutRequest: + """ + TurnEventCalloutCalloutRequest. + + :param str method: (optional) The REST method of the request. + :param str url: (optional) The host URL of the request call. + :param str path: (optional) The URL path of the request call. + :param str query_parameters: (optional) Any query parameters appended to the URL + of the request call. + :param dict headers_: (optional) Any headers included in the request call. + :param dict body: (optional) Contains the response of the external server or an + object. In cases like timeouts or connections errors, it will contain details of + why the callout to the external server failed. + """ + + def __init__( + self, + *, + method: Optional[str] = None, + url: Optional[str] = None, + path: Optional[str] = None, + query_parameters: Optional[str] = None, + headers_: Optional[dict] = None, + body: Optional[dict] = None, + ) -> None: + """ + Initialize a TurnEventCalloutCalloutRequest object. + + :param str method: (optional) The REST method of the request. + :param str url: (optional) The host URL of the request call. + :param str path: (optional) The URL path of the request call. + :param str query_parameters: (optional) Any query parameters appended to + the URL of the request call. + :param dict headers_: (optional) Any headers included in the request call. + :param dict body: (optional) Contains the response of the external server + or an object. In cases like timeouts or connections errors, it will contain + details of why the callout to the external server failed. + """ + self.method = method + self.url = url + self.path = path + self.query_parameters = query_parameters + self.headers_ = headers_ + self.body = body + + @classmethod + def from_dict(cls, _dict: Dict) -> 'TurnEventCalloutCalloutRequest': + """Initialize a TurnEventCalloutCalloutRequest object from a json dictionary.""" + args = {} + if (method := _dict.get('method')) is not None: + args['method'] = method + if (url := _dict.get('url')) is not None: + args['url'] = url + if (path := _dict.get('path')) is not None: + args['path'] = path + if (query_parameters := _dict.get('query_parameters')) is not None: + args['query_parameters'] = query_parameters + if (headers_ := _dict.get('headers')) is not None: + args['headers_'] = headers_ + if (body := _dict.get('body')) is not None: + args['body'] = body + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a TurnEventCalloutCalloutRequest object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'method') and self.method is not None: + _dict['method'] = self.method + if hasattr(self, 'url') and self.url is not None: + _dict['url'] = self.url + if hasattr(self, 'path') and self.path is not None: + _dict['path'] = self.path + if hasattr(self, + 'query_parameters') and self.query_parameters is not None: + _dict['query_parameters'] = self.query_parameters + if hasattr(self, 'headers_') and self.headers_ is not None: + _dict['headers'] = self.headers_ + if hasattr(self, 'body') and self.body is not None: + _dict['body'] = self.body + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this TurnEventCalloutCalloutRequest object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'TurnEventCalloutCalloutRequest') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'TurnEventCalloutCalloutRequest') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class MethodEnum(str, Enum): + """ + The REST method of the request. + """ + + GET = 'get' + POST = 'post' + PUT = 'put' + DELETE = 'delete' + PATCH = 'patch' + + +class TurnEventCalloutCalloutResponse: + """ + TurnEventCalloutCalloutResponse. + + :param str body: (optional) The final response string. This response is a + composition of every partial chunk received from the stream. + :param int status_code: (optional) The final status code of the response. + :param dict last_event: (optional) The response from the last chunk received + from the response stream. + """ + + def __init__( + self, + *, + body: Optional[str] = None, + status_code: Optional[int] = None, + last_event: Optional[dict] = None, + ) -> None: + """ + Initialize a TurnEventCalloutCalloutResponse object. + + :param str body: (optional) The final response string. This response is a + composition of every partial chunk received from the stream. + :param int status_code: (optional) The final status code of the response. + :param dict last_event: (optional) The response from the last chunk + received from the response stream. + """ + self.body = body + self.status_code = status_code + self.last_event = last_event + + @classmethod + def from_dict(cls, _dict: Dict) -> 'TurnEventCalloutCalloutResponse': + """Initialize a TurnEventCalloutCalloutResponse object from a json dictionary.""" + args = {} + if (body := _dict.get('body')) is not None: + args['body'] = body + if (status_code := _dict.get('status_code')) is not None: + args['status_code'] = status_code + if (last_event := _dict.get('last_event')) is not None: + args['last_event'] = last_event + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a TurnEventCalloutCalloutResponse object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'body') and self.body is not None: + _dict['body'] = self.body + if hasattr(self, 'status_code') and self.status_code is not None: + _dict['status_code'] = self.status_code + if hasattr(self, 'last_event') and self.last_event is not None: + _dict['last_event'] = self.last_event + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this TurnEventCalloutCalloutResponse object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'TurnEventCalloutCalloutResponse') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'TurnEventCalloutCalloutResponse') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class TurnEventCalloutError: + """ + TurnEventCalloutError. + + :param str message: (optional) Any error message returned by a failed call to an + external service. + """ + + def __init__( + self, + *, + message: Optional[str] = None, + ) -> None: + """ + Initialize a TurnEventCalloutError object. + + :param str message: (optional) Any error message returned by a failed call + to an external service. + """ + self.message = message + + @classmethod + def from_dict(cls, _dict: Dict) -> 'TurnEventCalloutError': + """Initialize a TurnEventCalloutError object from a json dictionary.""" + args = {} + if (message := _dict.get('message')) is not None: + args['message'] = message + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a TurnEventCalloutError object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'message') and self.message is not None: + _dict['message'] = self.message + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this TurnEventCalloutError object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'TurnEventCalloutError') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'TurnEventCalloutError') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class TurnEventGenerativeAICalledCallout: + """ + TurnEventGenerativeAICalledCallout. + + :param bool search_called: (optional) Whether the document search engine was + called. + :param bool llm_called: (optional) Whether watsonx.ai was called during answer + generation. + :param TurnEventGenerativeAICalledCalloutSearch search: (optional) + :param TurnEventGenerativeAICalledCalloutLlm llm: (optional) + :param str idk_reason_code: (optional) Indicates why a conversational search + response resolved to an idk response. This field will only be available when the + conversational search response is an idk response. + """ + + def __init__( + self, + *, + search_called: Optional[bool] = None, + llm_called: Optional[bool] = None, + search: Optional['TurnEventGenerativeAICalledCalloutSearch'] = None, + llm: Optional['TurnEventGenerativeAICalledCalloutLlm'] = None, + idk_reason_code: Optional[str] = None, + ) -> None: + """ + Initialize a TurnEventGenerativeAICalledCallout object. + + :param bool search_called: (optional) Whether the document search engine + was called. + :param bool llm_called: (optional) Whether watsonx.ai was called during + answer generation. + :param TurnEventGenerativeAICalledCalloutSearch search: (optional) + :param TurnEventGenerativeAICalledCalloutLlm llm: (optional) + :param str idk_reason_code: (optional) Indicates why a conversational + search response resolved to an idk response. This field will only be + available when the conversational search response is an idk response. + """ + self.search_called = search_called + self.llm_called = llm_called + self.search = search + self.llm = llm + self.idk_reason_code = idk_reason_code + + @classmethod + def from_dict(cls, _dict: Dict) -> 'TurnEventGenerativeAICalledCallout': + """Initialize a TurnEventGenerativeAICalledCallout object from a json dictionary.""" + args = {} + if (search_called := _dict.get('search_called')) is not None: + args['search_called'] = search_called + if (llm_called := _dict.get('llm_called')) is not None: + args['llm_called'] = llm_called + if (search := _dict.get('search')) is not None: + args['search'] = TurnEventGenerativeAICalledCalloutSearch.from_dict( + search) + if (llm := _dict.get('llm')) is not None: + args['llm'] = TurnEventGenerativeAICalledCalloutLlm.from_dict(llm) + if (idk_reason_code := _dict.get('idk_reason_code')) is not None: + args['idk_reason_code'] = idk_reason_code + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a TurnEventGenerativeAICalledCallout object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'search_called') and self.search_called is not None: + _dict['search_called'] = self.search_called + if hasattr(self, 'llm_called') and self.llm_called is not None: + _dict['llm_called'] = self.llm_called + if hasattr(self, 'search') and self.search is not None: + if isinstance(self.search, dict): + _dict['search'] = self.search + else: + _dict['search'] = self.search.to_dict() + if hasattr(self, 'llm') and self.llm is not None: + if isinstance(self.llm, dict): + _dict['llm'] = self.llm + else: + _dict['llm'] = self.llm.to_dict() + if hasattr(self, + 'idk_reason_code') and self.idk_reason_code is not None: + _dict['idk_reason_code'] = self.idk_reason_code + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this TurnEventGenerativeAICalledCallout object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'TurnEventGenerativeAICalledCallout') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'TurnEventGenerativeAICalledCallout') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class TurnEventGenerativeAICalledCalloutLlm: + """ + TurnEventGenerativeAICalledCalloutLlm. + + :param str type: (optional) The name of the LLM engine called by the system. + :param str model_id: (optional) The LLM model used to generate the response. + :param str model_class_id: (optional) The watsonx.ai class ID that was used + during the answer generation request to the LLM. This is only included when a + request to the LLM has been made by the system. + :param int generated_token_count: (optional) The number of tokens that were + generated in the response by the LLM. This is only included when a request to + the LLM was successful and a response was generated. + :param int input_token_count: (optional) The number of tokens that were sent to + the LLM during answer generation. This is only included when a request to the + LLM has been made by the system. + :param bool success: (optional) Whether the answer generation request to the LLM + was successful. + :param TurnEventGenerativeAICalledCalloutLlmResponse response: (optional) + :param List[SearchResults] request: (optional) n array of objects containing the + search results. + """ + + def __init__( + self, + *, + type: Optional[str] = None, + model_id: Optional[str] = None, + model_class_id: Optional[str] = None, + generated_token_count: Optional[int] = None, + input_token_count: Optional[int] = None, + success: Optional[bool] = None, + response: Optional[ + 'TurnEventGenerativeAICalledCalloutLlmResponse'] = None, + request: Optional[List['SearchResults']] = None, + ) -> None: + """ + Initialize a TurnEventGenerativeAICalledCalloutLlm object. + + :param str type: (optional) The name of the LLM engine called by the + system. + :param str model_id: (optional) The LLM model used to generate the + response. + :param str model_class_id: (optional) The watsonx.ai class ID that was used + during the answer generation request to the LLM. This is only included when + a request to the LLM has been made by the system. + :param int generated_token_count: (optional) The number of tokens that were + generated in the response by the LLM. This is only included when a request + to the LLM was successful and a response was generated. + :param int input_token_count: (optional) The number of tokens that were + sent to the LLM during answer generation. This is only included when a + request to the LLM has been made by the system. + :param bool success: (optional) Whether the answer generation request to + the LLM was successful. + :param TurnEventGenerativeAICalledCalloutLlmResponse response: (optional) + :param List[SearchResults] request: (optional) n array of objects + containing the search results. + """ + self.type = type + self.model_id = model_id + self.model_class_id = model_class_id + self.generated_token_count = generated_token_count + self.input_token_count = input_token_count + self.success = success + self.response = response + self.request = request + + @classmethod + def from_dict(cls, _dict: Dict) -> 'TurnEventGenerativeAICalledCalloutLlm': + """Initialize a TurnEventGenerativeAICalledCalloutLlm object from a json dictionary.""" + args = {} + if (type := _dict.get('type')) is not None: + args['type'] = type + if (model_id := _dict.get('model_id')) is not None: + args['model_id'] = model_id + if (model_class_id := _dict.get('model_class_id')) is not None: + args['model_class_id'] = model_class_id + if (generated_token_count := + _dict.get('generated_token_count')) is not None: + args['generated_token_count'] = generated_token_count + if (input_token_count := _dict.get('input_token_count')) is not None: + args['input_token_count'] = input_token_count + if (success := _dict.get('success')) is not None: + args['success'] = success + if (response := _dict.get('response')) is not None: + args[ + 'response'] = TurnEventGenerativeAICalledCalloutLlmResponse.from_dict( + response) + if (request := _dict.get('request')) is not None: + args['request'] = [SearchResults.from_dict(v) for v in request] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a TurnEventGenerativeAICalledCalloutLlm object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'model_id') and self.model_id is not None: + _dict['model_id'] = self.model_id + if hasattr(self, 'model_class_id') and self.model_class_id is not None: + _dict['model_class_id'] = self.model_class_id + if hasattr(self, 'generated_token_count' + ) and self.generated_token_count is not None: + _dict['generated_token_count'] = self.generated_token_count + if hasattr(self, + 'input_token_count') and self.input_token_count is not None: + _dict['input_token_count'] = self.input_token_count + if hasattr(self, 'success') and self.success is not None: + _dict['success'] = self.success + if hasattr(self, 'response') and self.response is not None: + if isinstance(self.response, dict): + _dict['response'] = self.response + else: + _dict['response'] = self.response.to_dict() + if hasattr(self, 'request') and self.request is not None: + request_list = [] + for v in self.request: + if isinstance(v, dict): + request_list.append(v) + else: + request_list.append(v.to_dict()) + _dict['request'] = request_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this TurnEventGenerativeAICalledCalloutLlm object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'TurnEventGenerativeAICalledCalloutLlm') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'TurnEventGenerativeAICalledCalloutLlm') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class TurnEventGenerativeAICalledCalloutLlmResponse: + """ + + + :param str text: (optional) The LLM response that is returned. + :param str response_type: (optional) The type of response that is returned. + :param bool is_idk_response: (optional) Whether the response is an idk response. + """ + + def __init__( + self, + *, + text: Optional[str] = None, + response_type: Optional[str] = None, + is_idk_response: Optional[bool] = None, + ) -> None: + """ + Initialize a TurnEventGenerativeAICalledCalloutLlmResponse object. + + :param str text: (optional) The LLM response that is returned. + :param str response_type: (optional) The type of response that is returned. + :param bool is_idk_response: (optional) Whether the response is an idk + response. + """ + self.text = text + self.response_type = response_type + self.is_idk_response = is_idk_response + + @classmethod + def from_dict( + cls, + _dict: Dict) -> 'TurnEventGenerativeAICalledCalloutLlmResponse': + """Initialize a TurnEventGenerativeAICalledCalloutLlmResponse object from a json dictionary.""" + args = {} + if (text := _dict.get('text')) is not None: + args['text'] = text + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type + if (is_idk_response := _dict.get('is_idk_response')) is not None: + args['is_idk_response'] = is_idk_response + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a TurnEventGenerativeAICalledCalloutLlmResponse object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'text') and self.text is not None: + _dict['text'] = self.text + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, + 'is_idk_response') and self.is_idk_response is not None: + _dict['is_idk_response'] = self.is_idk_response + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this TurnEventGenerativeAICalledCalloutLlmResponse object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, + other: 'TurnEventGenerativeAICalledCalloutLlmResponse') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, + other: 'TurnEventGenerativeAICalledCalloutLlmResponse') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class TurnEventGenerativeAICalledCalloutRequest: + """ + TurnEventGenerativeAICalledCalloutRequest. + + :param str method: (optional) The REST method of the request. + :param str url: (optional) The host URL of the request call. + :param str port: (optional) The host port of the request call. + :param str path: (optional) The URL path of the request call. + :param str query_parameters: (optional) Any query parameters appended to the URL + of the request call. + :param dict headers_: (optional) Any headers included in the request call. + :param dict body: (optional) Contains the response of the external server or an + object. In cases like timeouts or connections errors, it will contain details of + why the callout to the external server failed. + """ + + def __init__( + self, + *, + method: Optional[str] = None, + url: Optional[str] = None, + port: Optional[str] = None, + path: Optional[str] = None, + query_parameters: Optional[str] = None, + headers_: Optional[dict] = None, + body: Optional[dict] = None, + ) -> None: + """ + Initialize a TurnEventGenerativeAICalledCalloutRequest object. + + :param str method: (optional) The REST method of the request. + :param str url: (optional) The host URL of the request call. + :param str port: (optional) The host port of the request call. + :param str path: (optional) The URL path of the request call. + :param str query_parameters: (optional) Any query parameters appended to + the URL of the request call. + :param dict headers_: (optional) Any headers included in the request call. + :param dict body: (optional) Contains the response of the external server + or an object. In cases like timeouts or connections errors, it will contain + details of why the callout to the external server failed. + """ + self.method = method + self.url = url + self.port = port + self.path = path + self.query_parameters = query_parameters + self.headers_ = headers_ + self.body = body + + @classmethod + def from_dict(cls, + _dict: Dict) -> 'TurnEventGenerativeAICalledCalloutRequest': + """Initialize a TurnEventGenerativeAICalledCalloutRequest object from a json dictionary.""" + args = {} + if (method := _dict.get('method')) is not None: + args['method'] = method + if (url := _dict.get('url')) is not None: + args['url'] = url + if (port := _dict.get('port')) is not None: + args['port'] = port + if (path := _dict.get('path')) is not None: + args['path'] = path + if (query_parameters := _dict.get('query_parameters')) is not None: + args['query_parameters'] = query_parameters + if (headers_ := _dict.get('headers')) is not None: + args['headers_'] = headers_ + if (body := _dict.get('body')) is not None: + args['body'] = body + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a TurnEventGenerativeAICalledCalloutRequest object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'method') and self.method is not None: + _dict['method'] = self.method + if hasattr(self, 'url') and self.url is not None: + _dict['url'] = self.url + if hasattr(self, 'port') and self.port is not None: + _dict['port'] = self.port + if hasattr(self, 'path') and self.path is not None: + _dict['path'] = self.path + if hasattr(self, + 'query_parameters') and self.query_parameters is not None: + _dict['query_parameters'] = self.query_parameters + if hasattr(self, 'headers_') and self.headers_ is not None: + _dict['headers'] = self.headers_ + if hasattr(self, 'body') and self.body is not None: + _dict['body'] = self.body + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this TurnEventGenerativeAICalledCalloutRequest object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, + other: 'TurnEventGenerativeAICalledCalloutRequest') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, + other: 'TurnEventGenerativeAICalledCalloutRequest') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class MethodEnum(str, Enum): + """ + The REST method of the request. + """ + + GET = 'GET' + POST = 'POST' + PUT = 'PUT' + DELETE = 'DELETE' + PATCH = 'PATCH' + + +class TurnEventGenerativeAICalledCalloutResponse: + """ + TurnEventGenerativeAICalledCalloutResponse. + + :param str body: (optional) The final response string. This response is a + composition of every partial chunk received from the stream. + :param int status_code: (optional) The final status code of the response. + """ + + def __init__( + self, + *, + body: Optional[str] = None, + status_code: Optional[int] = None, + ) -> None: + """ + Initialize a TurnEventGenerativeAICalledCalloutResponse object. + + :param str body: (optional) The final response string. This response is a + composition of every partial chunk received from the stream. + :param int status_code: (optional) The final status code of the response. + """ + self.body = body + self.status_code = status_code + + @classmethod + def from_dict(cls, + _dict: Dict) -> 'TurnEventGenerativeAICalledCalloutResponse': + """Initialize a TurnEventGenerativeAICalledCalloutResponse object from a json dictionary.""" + args = {} + if (body := _dict.get('body')) is not None: + args['body'] = body + if (status_code := _dict.get('status_code')) is not None: + args['status_code'] = status_code + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a TurnEventGenerativeAICalledCalloutResponse object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'body') and self.body is not None: + _dict['body'] = self.body + if hasattr(self, 'status_code') and self.status_code is not None: + _dict['status_code'] = self.status_code + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this TurnEventGenerativeAICalledCalloutResponse object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, + other: 'TurnEventGenerativeAICalledCalloutResponse') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, + other: 'TurnEventGenerativeAICalledCalloutResponse') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class TurnEventGenerativeAICalledCalloutSearch: + """ + TurnEventGenerativeAICalledCalloutSearch. + + :param str engine: (optional) The search engine that was used to scan the + documents. + :param str index: (optional) The name of the Elasticsearch index being used. + This field is only available if the engine being used is Elasticsearch. + :param str query: (optional) The query that will be used by the system to + initiate search on the document search engine. + :param TurnEventGenerativeAICalledCalloutRequest request: (optional) + :param TurnEventGenerativeAICalledCalloutResponse response: (optional) + """ + + def __init__( + self, + *, + engine: Optional[str] = None, + index: Optional[str] = None, + query: Optional[str] = None, + request: Optional['TurnEventGenerativeAICalledCalloutRequest'] = None, + response: Optional['TurnEventGenerativeAICalledCalloutResponse'] = None, + ) -> None: + """ + Initialize a TurnEventGenerativeAICalledCalloutSearch object. + + :param str engine: (optional) The search engine that was used to scan the + documents. + :param str index: (optional) The name of the Elasticsearch index being + used. This field is only available if the engine being used is + Elasticsearch. + :param str query: (optional) The query that will be used by the system to + initiate search on the document search engine. + :param TurnEventGenerativeAICalledCalloutRequest request: (optional) + :param TurnEventGenerativeAICalledCalloutResponse response: (optional) + """ + self.engine = engine + self.index = index + self.query = query + self.request = request + self.response = response + + @classmethod + def from_dict(cls, + _dict: Dict) -> 'TurnEventGenerativeAICalledCalloutSearch': + """Initialize a TurnEventGenerativeAICalledCalloutSearch object from a json dictionary.""" + args = {} + if (engine := _dict.get('engine')) is not None: + args['engine'] = engine + if (index := _dict.get('index')) is not None: + args['index'] = index + if (query := _dict.get('query')) is not None: + args['query'] = query + if (request := _dict.get('request')) is not None: + args[ + 'request'] = TurnEventGenerativeAICalledCalloutRequest.from_dict( + request) + if (response := _dict.get('response')) is not None: + args[ + 'response'] = TurnEventGenerativeAICalledCalloutResponse.from_dict( + response) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a TurnEventGenerativeAICalledCalloutSearch object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'engine') and self.engine is not None: + _dict['engine'] = self.engine + if hasattr(self, 'index') and self.index is not None: + _dict['index'] = self.index + if hasattr(self, 'query') and self.query is not None: + _dict['query'] = self.query + if hasattr(self, 'request') and self.request is not None: + if isinstance(self.request, dict): + _dict['request'] = self.request + else: + _dict['request'] = self.request.to_dict() + if hasattr(self, 'response') and self.response is not None: + if isinstance(self.response, dict): + _dict['response'] = self.response + else: + _dict['response'] = self.response.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this TurnEventGenerativeAICalledCalloutSearch object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'TurnEventGenerativeAICalledCalloutSearch') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'TurnEventGenerativeAICalledCalloutSearch') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class TurnEventGenerativeAICalledMetrics: + """ + TurnEventGenerativeAICalledMetrics. + + :param float search_time_ms: (optional) The amount of time (in milliseconds) it + took for the system to complete the search using the document search engine. + :param float answer_generation_time_ms: (optional) The amount of time (in + milliseconds) it took for the system to complete answer generation process by + reaching out to watsonx.ai. + :param float total_time_ms: (optional) The amount of time (in milliseconds) it + took for the system to fully process the conversational search. + """ + + def __init__( + self, + *, + search_time_ms: Optional[float] = None, + answer_generation_time_ms: Optional[float] = None, + total_time_ms: Optional[float] = None, + ) -> None: + """ + Initialize a TurnEventGenerativeAICalledMetrics object. + + :param float search_time_ms: (optional) The amount of time (in + milliseconds) it took for the system to complete the search using the + document search engine. + :param float answer_generation_time_ms: (optional) The amount of time (in + milliseconds) it took for the system to complete answer generation process + by reaching out to watsonx.ai. + :param float total_time_ms: (optional) The amount of time (in milliseconds) + it took for the system to fully process the conversational search. + """ + self.search_time_ms = search_time_ms + self.answer_generation_time_ms = answer_generation_time_ms + self.total_time_ms = total_time_ms + + @classmethod + def from_dict(cls, _dict: Dict) -> 'TurnEventGenerativeAICalledMetrics': + """Initialize a TurnEventGenerativeAICalledMetrics object from a json dictionary.""" + args = {} + if (search_time_ms := _dict.get('search_time_ms')) is not None: + args['search_time_ms'] = search_time_ms + if (answer_generation_time_ms := + _dict.get('answer_generation_time_ms')) is not None: + args['answer_generation_time_ms'] = answer_generation_time_ms + if (total_time_ms := _dict.get('total_time_ms')) is not None: + args['total_time_ms'] = total_time_ms + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a TurnEventGenerativeAICalledMetrics object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'search_time_ms') and self.search_time_ms is not None: + _dict['search_time_ms'] = self.search_time_ms + if hasattr(self, 'answer_generation_time_ms' + ) and self.answer_generation_time_ms is not None: + _dict['answer_generation_time_ms'] = self.answer_generation_time_ms + if hasattr(self, 'total_time_ms') and self.total_time_ms is not None: + _dict['total_time_ms'] = self.total_time_ms + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this TurnEventGenerativeAICalledMetrics object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'TurnEventGenerativeAICalledMetrics') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'TurnEventGenerativeAICalledMetrics') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class TurnEventNodeSource: + """ + TurnEventNodeSource. + + :param str type: (optional) The type of turn event. + :param str dialog_node: (optional) A dialog node that was visited during + processing of the input message. + :param str title: (optional) The title of the dialog node. + :param str condition: (optional) The condition that triggered the dialog node. + """ + + def __init__( + self, + *, + type: Optional[str] = None, + dialog_node: Optional[str] = None, + title: Optional[str] = None, + condition: Optional[str] = None, + ) -> None: + """ + Initialize a TurnEventNodeSource object. + + :param str type: (optional) The type of turn event. + :param str dialog_node: (optional) A dialog node that was visited during + processing of the input message. + :param str title: (optional) The title of the dialog node. + :param str condition: (optional) The condition that triggered the dialog + node. + """ + self.type = type + self.dialog_node = dialog_node + self.title = title + self.condition = condition + + @classmethod + def from_dict(cls, _dict: Dict) -> 'TurnEventNodeSource': + """Initialize a TurnEventNodeSource object from a json dictionary.""" + args = {} + if (type := _dict.get('type')) is not None: + args['type'] = type + if (dialog_node := _dict.get('dialog_node')) is not None: + args['dialog_node'] = dialog_node + if (title := _dict.get('title')) is not None: + args['title'] = title + if (condition := _dict.get('condition')) is not None: + args['condition'] = condition + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a TurnEventNodeSource object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'dialog_node') and self.dialog_node is not None: + _dict['dialog_node'] = self.dialog_node + if hasattr(self, 'title') and self.title is not None: + _dict['title'] = self.title + if hasattr(self, 'condition') and self.condition is not None: + _dict['condition'] = self.condition + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this TurnEventNodeSource object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'TurnEventNodeSource') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'TurnEventNodeSource') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class TypeEnum(str, Enum): + """ + The type of turn event. + """ + + DIALOG_NODE = 'dialog_node' + + +class TurnEventSearchError: + """ + TurnEventSearchError. + + :param str message: (optional) Any error message returned by a failed call to a + search skill. + """ + + def __init__( + self, + *, + message: Optional[str] = None, + ) -> None: + """ + Initialize a TurnEventSearchError object. + + :param str message: (optional) Any error message returned by a failed call + to a search skill. + """ + self.message = message + + @classmethod + def from_dict(cls, _dict: Dict) -> 'TurnEventSearchError': + """Initialize a TurnEventSearchError object from a json dictionary.""" + args = {} + if (message := _dict.get('message')) is not None: + args['message'] = message + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a TurnEventSearchError object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'message') and self.message is not None: + _dict['message'] = self.message + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this TurnEventSearchError object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'TurnEventSearchError') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'TurnEventSearchError') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class TurnEventStepSource: + """ + TurnEventStepSource. + + :param str type: (optional) The type of turn event. + :param str action: (optional) An action that was visited during processing of + the message. + :param str action_title: (optional) The title of the action. + :param str step: (optional) A step that was visited during processing of the + message. + :param bool is_ai_guided: (optional) Whether the action that the turn event was + generated from is an AI-guided action. + :param bool is_skill_based: (optional) Whether the action that the turn event + was generated from is a skill-guided action. + """ + + def __init__( + self, + *, + type: Optional[str] = None, + action: Optional[str] = None, + action_title: Optional[str] = None, + step: Optional[str] = None, + is_ai_guided: Optional[bool] = None, + is_skill_based: Optional[bool] = None, + ) -> None: + """ + Initialize a TurnEventStepSource object. + + :param str type: (optional) The type of turn event. + :param str action: (optional) An action that was visited during processing + of the message. + :param str action_title: (optional) The title of the action. + :param str step: (optional) A step that was visited during processing of + the message. + :param bool is_ai_guided: (optional) Whether the action that the turn event + was generated from is an AI-guided action. + :param bool is_skill_based: (optional) Whether the action that the turn + event was generated from is a skill-guided action. + """ + self.type = type + self.action = action + self.action_title = action_title + self.step = step + self.is_ai_guided = is_ai_guided + self.is_skill_based = is_skill_based + + @classmethod + def from_dict(cls, _dict: Dict) -> 'TurnEventStepSource': + """Initialize a TurnEventStepSource object from a json dictionary.""" + args = {} + if (type := _dict.get('type')) is not None: + args['type'] = type + if (action := _dict.get('action')) is not None: + args['action'] = action + if (action_title := _dict.get('action_title')) is not None: + args['action_title'] = action_title + if (step := _dict.get('step')) is not None: + args['step'] = step + if (is_ai_guided := _dict.get('is_ai_guided')) is not None: + args['is_ai_guided'] = is_ai_guided + if (is_skill_based := _dict.get('is_skill_based')) is not None: + args['is_skill_based'] = is_skill_based + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a TurnEventStepSource object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'action') and self.action is not None: + _dict['action'] = self.action + if hasattr(self, 'action_title') and self.action_title is not None: + _dict['action_title'] = self.action_title + if hasattr(self, 'step') and self.step is not None: + _dict['step'] = self.step + if hasattr(self, 'is_ai_guided') and self.is_ai_guided is not None: + _dict['is_ai_guided'] = self.is_ai_guided + if hasattr(self, 'is_skill_based') and self.is_skill_based is not None: + _dict['is_skill_based'] = self.is_skill_based + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this TurnEventStepSource object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'TurnEventStepSource') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'TurnEventStepSource') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class TypeEnum(str, Enum): + """ + The type of turn event. + """ + + STEP = 'step' + + +class UpdateEnvironmentOrchestration: + """ + The search skill orchestration settings for the environment. + + :param bool search_skill_fallback: (optional) Whether to fall back to a search + skill when responding to messages that do not match any intent or action defined + in dialog or action skills. (If no search skill is configured for the + environment, this property is ignored.). + """ + + def __init__( + self, + *, + search_skill_fallback: Optional[bool] = None, + ) -> None: + """ + Initialize a UpdateEnvironmentOrchestration object. + + :param bool search_skill_fallback: (optional) Whether to fall back to a + search skill when responding to messages that do not match any intent or + action defined in dialog or action skills. (If no search skill is + configured for the environment, this property is ignored.). + """ + self.search_skill_fallback = search_skill_fallback + + @classmethod + def from_dict(cls, _dict: Dict) -> 'UpdateEnvironmentOrchestration': + """Initialize a UpdateEnvironmentOrchestration object from a json dictionary.""" + args = {} + if (search_skill_fallback := + _dict.get('search_skill_fallback')) is not None: + args['search_skill_fallback'] = search_skill_fallback + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a UpdateEnvironmentOrchestration object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'search_skill_fallback' + ) and self.search_skill_fallback is not None: + _dict['search_skill_fallback'] = self.search_skill_fallback + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this UpdateEnvironmentOrchestration object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'UpdateEnvironmentOrchestration') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'UpdateEnvironmentOrchestration') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class UpdateEnvironmentReleaseReference: + """ + An object describing the release that is currently deployed in the environment. + + :param str release: (optional) The name of the deployed release. + """ + + def __init__( + self, + *, + release: Optional[str] = None, + ) -> None: + """ + Initialize a UpdateEnvironmentReleaseReference object. + + :param str release: (optional) The name of the deployed release. + """ + self.release = release + + @classmethod + def from_dict(cls, _dict: Dict) -> 'UpdateEnvironmentReleaseReference': + """Initialize a UpdateEnvironmentReleaseReference object from a json dictionary.""" + args = {} + if (release := _dict.get('release')) is not None: + args['release'] = release + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a UpdateEnvironmentReleaseReference object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'release') and self.release is not None: + _dict['release'] = self.release + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this UpdateEnvironmentReleaseReference object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'UpdateEnvironmentReleaseReference') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'UpdateEnvironmentReleaseReference') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class CompleteItem(RuntimeResponseGeneric): + """ + CompleteItem. + + :param Metadata streaming_metadata: + """ + + def __init__( + self, + streaming_metadata: 'Metadata', + ) -> None: + """ + Initialize a CompleteItem object. + + :param Metadata streaming_metadata: + """ + # pylint: disable=super-init-not-called + self.streaming_metadata = streaming_metadata + + @classmethod + def from_dict(cls, _dict: Dict) -> 'CompleteItem': + """Initialize a CompleteItem object from a json dictionary.""" + args = {} + if (streaming_metadata := _dict.get('streaming_metadata')) is not None: + args['streaming_metadata'] = Metadata.from_dict(streaming_metadata) + else: + raise ValueError( + 'Required property \'streaming_metadata\' not present in CompleteItem JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a CompleteItem object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr( + self, + 'streaming_metadata') and self.streaming_metadata is not None: + if isinstance(self.streaming_metadata, dict): + _dict['streaming_metadata'] = self.streaming_metadata + else: + _dict['streaming_metadata'] = self.streaming_metadata.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this CompleteItem object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'CompleteItem') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'CompleteItem') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class GenerativeAITaskContentGroundedAnswering(GenerativeAITask): + """ + GenerativeAITaskContentGroundedAnswering. + + :param str task: (optional) The type of generative ai task. + :param bool is_idk_response: (optional) Whether response was an idk response. + :param bool is_hap_detected: (optional) Whether response was a hap response. + :param GenerativeAITaskConfidenceScores confidence_scores: (optional) The + confidence scores for determining whether to show the generated response or an + “I don't know” response. + :param str original_response: (optional) The original response returned by the + generative ai. + :param str inferred_query: (optional) Generated from the input text after + auto-correction. If this field is not present, the input text was used as the + query to the generative ai. + """ + + def __init__( + self, + *, + task: Optional[str] = None, + is_idk_response: Optional[bool] = None, + is_hap_detected: Optional[bool] = None, + confidence_scores: Optional['GenerativeAITaskConfidenceScores'] = None, + original_response: Optional[str] = None, + inferred_query: Optional[str] = None, + ) -> None: + """ + Initialize a GenerativeAITaskContentGroundedAnswering object. + + :param str task: (optional) The type of generative ai task. + :param bool is_idk_response: (optional) Whether response was an idk + response. + :param bool is_hap_detected: (optional) Whether response was a hap + response. + :param GenerativeAITaskConfidenceScores confidence_scores: (optional) The + confidence scores for determining whether to show the generated response or + an “I don't know” response. + :param str original_response: (optional) The original response returned by + the generative ai. + :param str inferred_query: (optional) Generated from the input text after + auto-correction. If this field is not present, the input text was used as + the query to the generative ai. + """ + # pylint: disable=super-init-not-called + self.task = task + self.is_idk_response = is_idk_response + self.is_hap_detected = is_hap_detected + self.confidence_scores = confidence_scores + self.original_response = original_response + self.inferred_query = inferred_query + + @classmethod + def from_dict(cls, + _dict: Dict) -> 'GenerativeAITaskContentGroundedAnswering': + """Initialize a GenerativeAITaskContentGroundedAnswering object from a json dictionary.""" + args = {} + if (task := _dict.get('task')) is not None: + args['task'] = task + if (is_idk_response := _dict.get('is_idk_response')) is not None: + args['is_idk_response'] = is_idk_response + if (is_hap_detected := _dict.get('is_hap_detected')) is not None: + args['is_hap_detected'] = is_hap_detected + if (confidence_scores := _dict.get('confidence_scores')) is not None: + args[ + 'confidence_scores'] = GenerativeAITaskConfidenceScores.from_dict( + confidence_scores) + if (original_response := _dict.get('original_response')) is not None: + args['original_response'] = original_response + if (inferred_query := _dict.get('inferred_query')) is not None: + args['inferred_query'] = inferred_query + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a GenerativeAITaskContentGroundedAnswering object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'task') and self.task is not None: + _dict['task'] = self.task + if hasattr(self, + 'is_idk_response') and self.is_idk_response is not None: + _dict['is_idk_response'] = self.is_idk_response + if hasattr(self, + 'is_hap_detected') and self.is_hap_detected is not None: + _dict['is_hap_detected'] = self.is_hap_detected + if hasattr(self, + 'confidence_scores') and self.confidence_scores is not None: + if isinstance(self.confidence_scores, dict): + _dict['confidence_scores'] = self.confidence_scores + else: + _dict['confidence_scores'] = self.confidence_scores.to_dict() + if hasattr(self, + 'original_response') and self.original_response is not None: + _dict['original_response'] = self.original_response + if hasattr(self, 'inferred_query') and self.inferred_query is not None: + _dict['inferred_query'] = self.inferred_query + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this GenerativeAITaskContentGroundedAnswering object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'GenerativeAITaskContentGroundedAnswering') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'GenerativeAITaskContentGroundedAnswering') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class GenerativeAITaskGeneralPurposeAnswering(GenerativeAITask): + """ + GenerativeAITaskGeneralPurposeAnswering. + + :param str task: (optional) The type of generative ai task. + :param bool is_idk_response: (optional) Whether response was an idk response. + :param bool is_hap_detected: (optional) Whether response was a hap response. + """ + + def __init__( + self, + *, + task: Optional[str] = None, + is_idk_response: Optional[bool] = None, + is_hap_detected: Optional[bool] = None, + ) -> None: + """ + Initialize a GenerativeAITaskGeneralPurposeAnswering object. + + :param str task: (optional) The type of generative ai task. + :param bool is_idk_response: (optional) Whether response was an idk + response. + :param bool is_hap_detected: (optional) Whether response was a hap + response. + """ + # pylint: disable=super-init-not-called + self.task = task + self.is_idk_response = is_idk_response + self.is_hap_detected = is_hap_detected + + @classmethod + def from_dict(cls, + _dict: Dict) -> 'GenerativeAITaskGeneralPurposeAnswering': + """Initialize a GenerativeAITaskGeneralPurposeAnswering object from a json dictionary.""" + args = {} + if (task := _dict.get('task')) is not None: + args['task'] = task + if (is_idk_response := _dict.get('is_idk_response')) is not None: + args['is_idk_response'] = is_idk_response + if (is_hap_detected := _dict.get('is_hap_detected')) is not None: + args['is_hap_detected'] = is_hap_detected + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a GenerativeAITaskGeneralPurposeAnswering object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'task') and self.task is not None: + _dict['task'] = self.task + if hasattr(self, + 'is_idk_response') and self.is_idk_response is not None: + _dict['is_idk_response'] = self.is_idk_response + if hasattr(self, + 'is_hap_detected') and self.is_hap_detected is not None: + _dict['is_hap_detected'] = self.is_hap_detected + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this GenerativeAITaskGeneralPurposeAnswering object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'GenerativeAITaskGeneralPurposeAnswering') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'GenerativeAITaskGeneralPurposeAnswering') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class LogMessageSourceAction(LogMessageSource): + """ + An object that identifies the dialog element that generated the error message. + + :param str type: A string that indicates the type of dialog element that + generated the error message. + :param str action: The unique identifier of the action that generated the error + message. + """ + + def __init__( + self, + type: str, + action: str, + ) -> None: + """ + Initialize a LogMessageSourceAction object. + + :param str type: A string that indicates the type of dialog element that + generated the error message. + :param str action: The unique identifier of the action that generated the + error message. + """ + # pylint: disable=super-init-not-called + self.type = type + self.action = action + + @classmethod + def from_dict(cls, _dict: Dict) -> 'LogMessageSourceAction': + """Initialize a LogMessageSourceAction object from a json dictionary.""" + args = {} + if (type := _dict.get('type')) is not None: + args['type'] = type + else: + raise ValueError( + 'Required property \'type\' not present in LogMessageSourceAction JSON' + ) + if (action := _dict.get('action')) is not None: + args['action'] = action + else: + raise ValueError( + 'Required property \'action\' not present in LogMessageSourceAction JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a LogMessageSourceAction object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'action') and self.action is not None: + _dict['action'] = self.action + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this LogMessageSourceAction object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'LogMessageSourceAction') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'LogMessageSourceAction') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class LogMessageSourceDialogNode(LogMessageSource): + """ + An object that identifies the dialog element that generated the error message. + + :param str type: A string that indicates the type of dialog element that + generated the error message. + :param str dialog_node: The unique identifier of the dialog node that generated + the error message. + """ + + def __init__( + self, + type: str, + dialog_node: str, + ) -> None: + """ + Initialize a LogMessageSourceDialogNode object. + + :param str type: A string that indicates the type of dialog element that + generated the error message. + :param str dialog_node: The unique identifier of the dialog node that + generated the error message. + """ + # pylint: disable=super-init-not-called + self.type = type + self.dialog_node = dialog_node + + @classmethod + def from_dict(cls, _dict: Dict) -> 'LogMessageSourceDialogNode': + """Initialize a LogMessageSourceDialogNode object from a json dictionary.""" + args = {} + if (type := _dict.get('type')) is not None: + args['type'] = type + else: + raise ValueError( + 'Required property \'type\' not present in LogMessageSourceDialogNode JSON' + ) + if (dialog_node := _dict.get('dialog_node')) is not None: + args['dialog_node'] = dialog_node + else: + raise ValueError( + 'Required property \'dialog_node\' not present in LogMessageSourceDialogNode JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a LogMessageSourceDialogNode object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'dialog_node') and self.dialog_node is not None: + _dict['dialog_node'] = self.dialog_node + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this LogMessageSourceDialogNode object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'LogMessageSourceDialogNode') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'LogMessageSourceDialogNode') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class LogMessageSourceHandler(LogMessageSource): + """ + An object that identifies the dialog element that generated the error message. + + :param str type: A string that indicates the type of dialog element that + generated the error message. + :param str action: The unique identifier of the action that generated the error + message. + :param str step: (optional) The unique identifier of the step that generated the + error message. + :param str handler: The unique identifier of the handler that generated the + error message. + """ + + def __init__( + self, + type: str, + action: str, + handler: str, + *, + step: Optional[str] = None, + ) -> None: + """ + Initialize a LogMessageSourceHandler object. + + :param str type: A string that indicates the type of dialog element that + generated the error message. + :param str action: The unique identifier of the action that generated the + error message. + :param str handler: The unique identifier of the handler that generated the + error message. + :param str step: (optional) The unique identifier of the step that + generated the error message. + """ + # pylint: disable=super-init-not-called + self.type = type + self.action = action + self.step = step + self.handler = handler + + @classmethod + def from_dict(cls, _dict: Dict) -> 'LogMessageSourceHandler': + """Initialize a LogMessageSourceHandler object from a json dictionary.""" + args = {} + if (type := _dict.get('type')) is not None: + args['type'] = type + else: + raise ValueError( + 'Required property \'type\' not present in LogMessageSourceHandler JSON' + ) + if (action := _dict.get('action')) is not None: + args['action'] = action + else: + raise ValueError( + 'Required property \'action\' not present in LogMessageSourceHandler JSON' + ) + if (step := _dict.get('step')) is not None: + args['step'] = step + if (handler := _dict.get('handler')) is not None: + args['handler'] = handler + else: + raise ValueError( + 'Required property \'handler\' not present in LogMessageSourceHandler JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a LogMessageSourceHandler object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'action') and self.action is not None: + _dict['action'] = self.action + if hasattr(self, 'step') and self.step is not None: + _dict['step'] = self.step + if hasattr(self, 'handler') and self.handler is not None: + _dict['handler'] = self.handler + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this LogMessageSourceHandler object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'LogMessageSourceHandler') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'LogMessageSourceHandler') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class LogMessageSourceStep(LogMessageSource): + """ + An object that identifies the dialog element that generated the error message. + + :param str type: A string that indicates the type of dialog element that + generated the error message. + :param str action: The unique identifier of the action that generated the error + message. + :param str step: The unique identifier of the step that generated the error + message. + """ + + def __init__( + self, + type: str, + action: str, + step: str, + ) -> None: + """ + Initialize a LogMessageSourceStep object. + + :param str type: A string that indicates the type of dialog element that + generated the error message. + :param str action: The unique identifier of the action that generated the + error message. + :param str step: The unique identifier of the step that generated the error + message. + """ + # pylint: disable=super-init-not-called + self.type = type + self.action = action + self.step = step + + @classmethod + def from_dict(cls, _dict: Dict) -> 'LogMessageSourceStep': + """Initialize a LogMessageSourceStep object from a json dictionary.""" + args = {} + if (type := _dict.get('type')) is not None: + args['type'] = type + else: + raise ValueError( + 'Required property \'type\' not present in LogMessageSourceStep JSON' + ) + if (action := _dict.get('action')) is not None: + args['action'] = action + else: + raise ValueError( + 'Required property \'action\' not present in LogMessageSourceStep JSON' + ) + if (step := _dict.get('step')) is not None: + args['step'] = step + else: + raise ValueError( + 'Required property \'step\' not present in LogMessageSourceStep JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a LogMessageSourceStep object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'action') and self.action is not None: + _dict['action'] = self.action + if hasattr(self, 'step') and self.step is not None: + _dict['step'] = self.step + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this LogMessageSourceStep object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'LogMessageSourceStep') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'LogMessageSourceStep') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class MessageOutputDebugTurnEventTurnEventActionFinished( + MessageOutputDebugTurnEvent): + """ + MessageOutputDebugTurnEventTurnEventActionFinished. + + :param str event: (optional) The type of turn event. + :param TurnEventActionSource source: (optional) + :param str action_start_time: (optional) The time when the action started + processing the message. + :param str condition_type: (optional) The type of condition (if any) that is + defined for the action. + :param str reason: (optional) The reason the action finished processing. + :param dict action_variables: (optional) The state of all action variables at + the time the action finished. + """ + + def __init__( + self, + *, + event: Optional[str] = None, + source: Optional['TurnEventActionSource'] = None, + action_start_time: Optional[str] = None, + condition_type: Optional[str] = None, + reason: Optional[str] = None, + action_variables: Optional[dict] = None, + ) -> None: + """ + Initialize a MessageOutputDebugTurnEventTurnEventActionFinished object. + + :param str event: (optional) The type of turn event. + :param TurnEventActionSource source: (optional) + :param str action_start_time: (optional) The time when the action started + processing the message. + :param str condition_type: (optional) The type of condition (if any) that + is defined for the action. + :param str reason: (optional) The reason the action finished processing. + :param dict action_variables: (optional) The state of all action variables + at the time the action finished. + """ + # pylint: disable=super-init-not-called + self.event = event + self.source = source + self.action_start_time = action_start_time + self.condition_type = condition_type + self.reason = reason + self.action_variables = action_variables + + @classmethod + def from_dict( + cls, _dict: Dict + ) -> 'MessageOutputDebugTurnEventTurnEventActionFinished': + """Initialize a MessageOutputDebugTurnEventTurnEventActionFinished object from a json dictionary.""" + args = {} + if (event := _dict.get('event')) is not None: + args['event'] = event + if (source := _dict.get('source')) is not None: + args['source'] = TurnEventActionSource.from_dict(source) + if (action_start_time := _dict.get('action_start_time')) is not None: + args['action_start_time'] = action_start_time + if (condition_type := _dict.get('condition_type')) is not None: + args['condition_type'] = condition_type + if (reason := _dict.get('reason')) is not None: + args['reason'] = reason + if (action_variables := _dict.get('action_variables')) is not None: + args['action_variables'] = action_variables + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageOutputDebugTurnEventTurnEventActionFinished object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'event') and self.event is not None: + _dict['event'] = self.event + if hasattr(self, 'source') and self.source is not None: + if isinstance(self.source, dict): + _dict['source'] = self.source + else: + _dict['source'] = self.source.to_dict() + if hasattr(self, + 'action_start_time') and self.action_start_time is not None: + _dict['action_start_time'] = self.action_start_time + if hasattr(self, 'condition_type') and self.condition_type is not None: + _dict['condition_type'] = self.condition_type + if hasattr(self, 'reason') and self.reason is not None: + _dict['reason'] = self.reason + if hasattr(self, + 'action_variables') and self.action_variables is not None: + _dict['action_variables'] = self.action_variables + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MessageOutputDebugTurnEventTurnEventActionFinished object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__( + self, other: 'MessageOutputDebugTurnEventTurnEventActionFinished' + ) -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__( + self, other: 'MessageOutputDebugTurnEventTurnEventActionFinished' + ) -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class ConditionTypeEnum(str, Enum): + """ + The type of condition (if any) that is defined for the action. + """ + + USER_DEFINED = 'user_defined' + WELCOME = 'welcome' + ANYTHING_ELSE = 'anything_else' + + class ReasonEnum(str, Enum): + """ + The reason the action finished processing. + """ + + ALL_STEPS_DONE = 'all_steps_done' + NO_STEPS_VISITED = 'no_steps_visited' + ENDED_BY_STEP = 'ended_by_step' + CONNECT_TO_AGENT = 'connect_to_agent' + MAX_RETRIES_REACHED = 'max_retries_reached' + FALLBACK = 'fallback' + + +class MessageOutputDebugTurnEventTurnEventActionRoutingDenied( + MessageOutputDebugTurnEvent): + """ + MessageOutputDebugTurnEventTurnEventActionRoutingDenied. + + :param str event: (optional) The type of turn event. + :param TurnEventActionSource source: (optional) + :param str condition_type: (optional) The type of condition (if any) that is + defined for the action. + :param str reason: (optional) The reason the action was visited. + """ + + def __init__( + self, + *, + event: Optional[str] = None, + source: Optional['TurnEventActionSource'] = None, + condition_type: Optional[str] = None, + reason: Optional[str] = None, + ) -> None: + """ + Initialize a MessageOutputDebugTurnEventTurnEventActionRoutingDenied object. + + :param str event: (optional) The type of turn event. + :param TurnEventActionSource source: (optional) + :param str condition_type: (optional) The type of condition (if any) that + is defined for the action. + :param str reason: (optional) The reason the action was visited. + """ + # pylint: disable=super-init-not-called + self.event = event + self.source = source + self.condition_type = condition_type + self.reason = reason + + @classmethod + def from_dict( + cls, _dict: Dict + ) -> 'MessageOutputDebugTurnEventTurnEventActionRoutingDenied': + """Initialize a MessageOutputDebugTurnEventTurnEventActionRoutingDenied object from a json dictionary.""" + args = {} + if (event := _dict.get('event')) is not None: + args['event'] = event + if (source := _dict.get('source')) is not None: + args['source'] = TurnEventActionSource.from_dict(source) + if (condition_type := _dict.get('condition_type')) is not None: + args['condition_type'] = condition_type + if (reason := _dict.get('reason')) is not None: + args['reason'] = reason + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageOutputDebugTurnEventTurnEventActionRoutingDenied object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'event') and self.event is not None: + _dict['event'] = self.event + if hasattr(self, 'source') and self.source is not None: + if isinstance(self.source, dict): + _dict['source'] = self.source + else: + _dict['source'] = self.source.to_dict() + if hasattr(self, 'condition_type') and self.condition_type is not None: + _dict['condition_type'] = self.condition_type + if hasattr(self, 'reason') and self.reason is not None: + _dict['reason'] = self.reason + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MessageOutputDebugTurnEventTurnEventActionRoutingDenied object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__( + self, other: 'MessageOutputDebugTurnEventTurnEventActionRoutingDenied' + ) -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__( + self, other: 'MessageOutputDebugTurnEventTurnEventActionRoutingDenied' + ) -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class ConditionTypeEnum(str, Enum): + """ + The type of condition (if any) that is defined for the action. + """ + + USER_DEFINED = 'user_defined' + WELCOME = 'welcome' + ANYTHING_ELSE = 'anything_else' + + class ReasonEnum(str, Enum): + """ + The reason the action was visited. + """ + + ACTION_CONDITIONS_FAILED = 'action_conditions_failed' + + +class MessageOutputDebugTurnEventTurnEventActionVisited( + MessageOutputDebugTurnEvent): + """ + MessageOutputDebugTurnEventTurnEventActionVisited. + + :param str event: (optional) The type of turn event. + :param TurnEventActionSource source: (optional) + :param str action_start_time: (optional) The time when the action started + processing the message. + :param str condition_type: (optional) The type of condition (if any) that is + defined for the action. + :param str reason: (optional) The reason the action was visited. + :param str result_variable: (optional) The variable where the result of the call + to the action is stored. Included only if **reason**=`subaction_return`. + """ + + def __init__( + self, + *, + event: Optional[str] = None, + source: Optional['TurnEventActionSource'] = None, + action_start_time: Optional[str] = None, + condition_type: Optional[str] = None, + reason: Optional[str] = None, + result_variable: Optional[str] = None, + ) -> None: + """ + Initialize a MessageOutputDebugTurnEventTurnEventActionVisited object. + + :param str event: (optional) The type of turn event. + :param TurnEventActionSource source: (optional) + :param str action_start_time: (optional) The time when the action started + processing the message. + :param str condition_type: (optional) The type of condition (if any) that + is defined for the action. + :param str reason: (optional) The reason the action was visited. + :param str result_variable: (optional) The variable where the result of the + call to the action is stored. Included only if + **reason**=`subaction_return`. + """ + # pylint: disable=super-init-not-called + self.event = event + self.source = source + self.action_start_time = action_start_time + self.condition_type = condition_type + self.reason = reason + self.result_variable = result_variable + + @classmethod + def from_dict( + cls, + _dict: Dict) -> 'MessageOutputDebugTurnEventTurnEventActionVisited': + """Initialize a MessageOutputDebugTurnEventTurnEventActionVisited object from a json dictionary.""" + args = {} + if (event := _dict.get('event')) is not None: + args['event'] = event + if (source := _dict.get('source')) is not None: + args['source'] = TurnEventActionSource.from_dict(source) + if (action_start_time := _dict.get('action_start_time')) is not None: + args['action_start_time'] = action_start_time + if (condition_type := _dict.get('condition_type')) is not None: + args['condition_type'] = condition_type + if (reason := _dict.get('reason')) is not None: + args['reason'] = reason + if (result_variable := _dict.get('result_variable')) is not None: + args['result_variable'] = result_variable + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageOutputDebugTurnEventTurnEventActionVisited object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'event') and self.event is not None: + _dict['event'] = self.event + if hasattr(self, 'source') and self.source is not None: + if isinstance(self.source, dict): + _dict['source'] = self.source + else: + _dict['source'] = self.source.to_dict() + if hasattr(self, + 'action_start_time') and self.action_start_time is not None: + _dict['action_start_time'] = self.action_start_time + if hasattr(self, 'condition_type') and self.condition_type is not None: + _dict['condition_type'] = self.condition_type + if hasattr(self, 'reason') and self.reason is not None: + _dict['reason'] = self.reason + if hasattr(self, + 'result_variable') and self.result_variable is not None: + _dict['result_variable'] = self.result_variable + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MessageOutputDebugTurnEventTurnEventActionVisited object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__( + self, + other: 'MessageOutputDebugTurnEventTurnEventActionVisited') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__( + self, + other: 'MessageOutputDebugTurnEventTurnEventActionVisited') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class ConditionTypeEnum(str, Enum): + """ + The type of condition (if any) that is defined for the action. + """ + + USER_DEFINED = 'user_defined' + WELCOME = 'welcome' + ANYTHING_ELSE = 'anything_else' + + class ReasonEnum(str, Enum): + """ + The reason the action was visited. + """ + + INTENT = 'intent' + INVOKE_SUBACTION = 'invoke_subaction' + SUBACTION_RETURN = 'subaction_return' + INVOKE_EXTERNAL = 'invoke_external' + TOPIC_SWITCH = 'topic_switch' + TOPIC_RETURN = 'topic_return' + AGENT_REQUESTED = 'agent_requested' + STEP_VALIDATION_FAILED = 'step_validation_failed' + NO_ACTION_MATCHES = 'no_action_matches' + + +class MessageOutputDebugTurnEventTurnEventCallout(MessageOutputDebugTurnEvent): + """ + MessageOutputDebugTurnEventTurnEventCallout. + + :param str event: (optional) The type of turn event. + :param TurnEventActionSource source: (optional) + :param TurnEventCalloutCallout callout: (optional) + :param TurnEventCalloutError error: (optional) + """ + + def __init__( + self, + *, + event: Optional[str] = None, + source: Optional['TurnEventActionSource'] = None, + callout: Optional['TurnEventCalloutCallout'] = None, + error: Optional['TurnEventCalloutError'] = None, + ) -> None: + """ + Initialize a MessageOutputDebugTurnEventTurnEventCallout object. + + :param str event: (optional) The type of turn event. + :param TurnEventActionSource source: (optional) + :param TurnEventCalloutCallout callout: (optional) + :param TurnEventCalloutError error: (optional) + """ + # pylint: disable=super-init-not-called + self.event = event + self.source = source + self.callout = callout + self.error = error + + @classmethod + def from_dict(cls, + _dict: Dict) -> 'MessageOutputDebugTurnEventTurnEventCallout': + """Initialize a MessageOutputDebugTurnEventTurnEventCallout object from a json dictionary.""" + args = {} + if (event := _dict.get('event')) is not None: + args['event'] = event + if (source := _dict.get('source')) is not None: + args['source'] = TurnEventActionSource.from_dict(source) + if (callout := _dict.get('callout')) is not None: + args['callout'] = TurnEventCalloutCallout.from_dict(callout) + if (error := _dict.get('error')) is not None: + args['error'] = TurnEventCalloutError.from_dict(error) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageOutputDebugTurnEventTurnEventCallout object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'event') and self.event is not None: + _dict['event'] = self.event + if hasattr(self, 'source') and self.source is not None: + if isinstance(self.source, dict): + _dict['source'] = self.source + else: + _dict['source'] = self.source.to_dict() + if hasattr(self, 'callout') and self.callout is not None: + if isinstance(self.callout, dict): + _dict['callout'] = self.callout + else: + _dict['callout'] = self.callout.to_dict() + if hasattr(self, 'error') and self.error is not None: + if isinstance(self.error, dict): + _dict['error'] = self.error + else: + _dict['error'] = self.error.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MessageOutputDebugTurnEventTurnEventCallout object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, + other: 'MessageOutputDebugTurnEventTurnEventCallout') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, + other: 'MessageOutputDebugTurnEventTurnEventCallout') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class MessageOutputDebugTurnEventTurnEventClientActions( + MessageOutputDebugTurnEvent): + """ + MessageOutputDebugTurnEventTurnEventClientActions. + + :param str event: (optional) The type of turn event. + :param TurnEventStepSource source: (optional) + :param List[ClientAction] client_actions: (optional) An array of client actions. + """ + + def __init__( + self, + *, + event: Optional[str] = None, + source: Optional['TurnEventStepSource'] = None, + client_actions: Optional[List['ClientAction']] = None, + ) -> None: + """ + Initialize a MessageOutputDebugTurnEventTurnEventClientActions object. + + :param str event: (optional) The type of turn event. + :param TurnEventStepSource source: (optional) + :param List[ClientAction] client_actions: (optional) An array of client + actions. + """ + # pylint: disable=super-init-not-called + self.event = event + self.source = source + self.client_actions = client_actions + + @classmethod + def from_dict( + cls, + _dict: Dict) -> 'MessageOutputDebugTurnEventTurnEventClientActions': + """Initialize a MessageOutputDebugTurnEventTurnEventClientActions object from a json dictionary.""" + args = {} + if (event := _dict.get('event')) is not None: + args['event'] = event + if (source := _dict.get('source')) is not None: + args['source'] = TurnEventStepSource.from_dict(source) + if (client_actions := _dict.get('client_actions')) is not None: + args['client_actions'] = [ + ClientAction.from_dict(v) for v in client_actions + ] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageOutputDebugTurnEventTurnEventClientActions object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'event') and self.event is not None: + _dict['event'] = self.event + if hasattr(self, 'source') and self.source is not None: + if isinstance(self.source, dict): + _dict['source'] = self.source + else: + _dict['source'] = self.source.to_dict() + if hasattr(self, 'client_actions') and self.client_actions is not None: + client_actions_list = [] + for v in self.client_actions: + if isinstance(v, dict): + client_actions_list.append(v) + else: + client_actions_list.append(v.to_dict()) + _dict['client_actions'] = client_actions_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MessageOutputDebugTurnEventTurnEventClientActions object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__( + self, + other: 'MessageOutputDebugTurnEventTurnEventClientActions') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__( + self, + other: 'MessageOutputDebugTurnEventTurnEventClientActions') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class MessageOutputDebugTurnEventTurnEventConversationalSearchEnd( + MessageOutputDebugTurnEvent): + """ + MessageOutputDebugTurnEventTurnEventConversationalSearchEnd. + + :param str event: (optional) The type of turn event. + :param TurnEventActionSource source: (optional) + :param str condition_type: (optional) The type of condition (if any) that is + defined for the action. + """ + + def __init__( + self, + *, + event: Optional[str] = None, + source: Optional['TurnEventActionSource'] = None, + condition_type: Optional[str] = None, + ) -> None: + """ + Initialize a MessageOutputDebugTurnEventTurnEventConversationalSearchEnd object. + + :param str event: (optional) The type of turn event. + :param TurnEventActionSource source: (optional) + :param str condition_type: (optional) The type of condition (if any) that + is defined for the action. + """ + # pylint: disable=super-init-not-called + self.event = event + self.source = source + self.condition_type = condition_type + + @classmethod + def from_dict( + cls, _dict: Dict + ) -> 'MessageOutputDebugTurnEventTurnEventConversationalSearchEnd': + """Initialize a MessageOutputDebugTurnEventTurnEventConversationalSearchEnd object from a json dictionary.""" + args = {} + if (event := _dict.get('event')) is not None: + args['event'] = event + if (source := _dict.get('source')) is not None: + args['source'] = TurnEventActionSource.from_dict(source) + if (condition_type := _dict.get('condition_type')) is not None: + args['condition_type'] = condition_type + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageOutputDebugTurnEventTurnEventConversationalSearchEnd object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'event') and self.event is not None: + _dict['event'] = self.event + if hasattr(self, 'source') and self.source is not None: + if isinstance(self.source, dict): + _dict['source'] = self.source + else: + _dict['source'] = self.source.to_dict() + if hasattr(self, 'condition_type') and self.condition_type is not None: + _dict['condition_type'] = self.condition_type + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MessageOutputDebugTurnEventTurnEventConversationalSearchEnd object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__( + self, + other: 'MessageOutputDebugTurnEventTurnEventConversationalSearchEnd' + ) -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__( + self, + other: 'MessageOutputDebugTurnEventTurnEventConversationalSearchEnd' + ) -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class ConditionTypeEnum(str, Enum): + """ + The type of condition (if any) that is defined for the action. + """ + + USER_DEFINED = 'user_defined' + WELCOME = 'welcome' + ANYTHING_ELSE = 'anything_else' + + +class MessageOutputDebugTurnEventTurnEventGenerativeAICalled( + MessageOutputDebugTurnEvent): + """ + MessageOutputDebugTurnEventTurnEventGenerativeAICalled. + + :param str event: (optional) The type of turn event. + :param dict source: (optional) For internal use only. + :param str generative_ai_start_time: (optional) The time when generative ai + started processing the message. + :param GenerativeAITask generative_ai: (optional) + :param TurnEventGenerativeAICalledCallout callout: (optional) + :param TurnEventGenerativeAICalledMetrics metrics: (optional) + """ + + def __init__( + self, + *, + event: Optional[str] = None, + source: Optional[dict] = None, + generative_ai_start_time: Optional[str] = None, + generative_ai: Optional['GenerativeAITask'] = None, + callout: Optional['TurnEventGenerativeAICalledCallout'] = None, + metrics: Optional['TurnEventGenerativeAICalledMetrics'] = None, + ) -> None: + """ + Initialize a MessageOutputDebugTurnEventTurnEventGenerativeAICalled object. + + :param str event: (optional) The type of turn event. + :param dict source: (optional) For internal use only. + :param str generative_ai_start_time: (optional) The time when generative ai + started processing the message. + :param GenerativeAITask generative_ai: (optional) + :param TurnEventGenerativeAICalledCallout callout: (optional) + :param TurnEventGenerativeAICalledMetrics metrics: (optional) + """ + # pylint: disable=super-init-not-called + self.event = event + self.source = source + self.generative_ai_start_time = generative_ai_start_time + self.generative_ai = generative_ai + self.callout = callout + self.metrics = metrics + + @classmethod + def from_dict( + cls, _dict: Dict + ) -> 'MessageOutputDebugTurnEventTurnEventGenerativeAICalled': + """Initialize a MessageOutputDebugTurnEventTurnEventGenerativeAICalled object from a json dictionary.""" + args = {} + if (event := _dict.get('event')) is not None: + args['event'] = event + if (source := _dict.get('source')) is not None: + args['source'] = source + if (generative_ai_start_time := + _dict.get('generative_ai_start_time')) is not None: + args['generative_ai_start_time'] = generative_ai_start_time + if (generative_ai := _dict.get('generative_ai')) is not None: + args['generative_ai'] = GenerativeAITask.from_dict(generative_ai) + if (callout := _dict.get('callout')) is not None: + args['callout'] = TurnEventGenerativeAICalledCallout.from_dict( + callout) + if (metrics := _dict.get('metrics')) is not None: + args['metrics'] = TurnEventGenerativeAICalledMetrics.from_dict( + metrics) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageOutputDebugTurnEventTurnEventGenerativeAICalled object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'event') and self.event is not None: + _dict['event'] = self.event + if hasattr(self, 'source') and self.source is not None: + _dict['source'] = self.source + if hasattr(self, 'generative_ai_start_time' + ) and self.generative_ai_start_time is not None: + _dict['generative_ai_start_time'] = self.generative_ai_start_time + if hasattr(self, 'generative_ai') and self.generative_ai is not None: + if isinstance(self.generative_ai, dict): + _dict['generative_ai'] = self.generative_ai + else: + _dict['generative_ai'] = self.generative_ai.to_dict() + if hasattr(self, 'callout') and self.callout is not None: + if isinstance(self.callout, dict): + _dict['callout'] = self.callout + else: + _dict['callout'] = self.callout.to_dict() + if hasattr(self, 'metrics') and self.metrics is not None: + if isinstance(self.metrics, dict): + _dict['metrics'] = self.metrics + else: + _dict['metrics'] = self.metrics.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MessageOutputDebugTurnEventTurnEventGenerativeAICalled object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__( + self, other: 'MessageOutputDebugTurnEventTurnEventGenerativeAICalled' + ) -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__( + self, other: 'MessageOutputDebugTurnEventTurnEventGenerativeAICalled' + ) -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class MessageOutputDebugTurnEventTurnEventHandlerVisited( + MessageOutputDebugTurnEvent): + """ + MessageOutputDebugTurnEventTurnEventHandlerVisited. + + :param str event: (optional) The type of turn event. + :param TurnEventActionSource source: (optional) + :param str action_start_time: (optional) The time when the action started + processing the message. + """ + + def __init__( + self, + *, + event: Optional[str] = None, + source: Optional['TurnEventActionSource'] = None, + action_start_time: Optional[str] = None, + ) -> None: + """ + Initialize a MessageOutputDebugTurnEventTurnEventHandlerVisited object. + + :param str event: (optional) The type of turn event. + :param TurnEventActionSource source: (optional) + :param str action_start_time: (optional) The time when the action started + processing the message. + """ + # pylint: disable=super-init-not-called + self.event = event + self.source = source + self.action_start_time = action_start_time + + @classmethod + def from_dict( + cls, _dict: Dict + ) -> 'MessageOutputDebugTurnEventTurnEventHandlerVisited': + """Initialize a MessageOutputDebugTurnEventTurnEventHandlerVisited object from a json dictionary.""" + args = {} + if (event := _dict.get('event')) is not None: + args['event'] = event + if (source := _dict.get('source')) is not None: + args['source'] = TurnEventActionSource.from_dict(source) + if (action_start_time := _dict.get('action_start_time')) is not None: + args['action_start_time'] = action_start_time + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageOutputDebugTurnEventTurnEventHandlerVisited object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'event') and self.event is not None: + _dict['event'] = self.event + if hasattr(self, 'source') and self.source is not None: + if isinstance(self.source, dict): + _dict['source'] = self.source + else: + _dict['source'] = self.source.to_dict() + if hasattr(self, + 'action_start_time') and self.action_start_time is not None: + _dict['action_start_time'] = self.action_start_time + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MessageOutputDebugTurnEventTurnEventHandlerVisited object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__( + self, other: 'MessageOutputDebugTurnEventTurnEventHandlerVisited' + ) -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__( + self, other: 'MessageOutputDebugTurnEventTurnEventHandlerVisited' + ) -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class MessageOutputDebugTurnEventTurnEventManualRoute( + MessageOutputDebugTurnEvent): + """ + MessageOutputDebugTurnEventTurnEventManualRoute. + + :param str event: (optional) The type of turn event. + :param TurnEventStepSource source: (optional) + :param str condition_type: (optional) The type of condition (if any) that is + defined for the action. + :param str action_start_time: (optional) The time when the action started + processing the message. + :param str route_name: (optional) The name of the route. + """ + + def __init__( + self, + *, + event: Optional[str] = None, + source: Optional['TurnEventStepSource'] = None, + condition_type: Optional[str] = None, + action_start_time: Optional[str] = None, + route_name: Optional[str] = None, + ) -> None: + """ + Initialize a MessageOutputDebugTurnEventTurnEventManualRoute object. + + :param str event: (optional) The type of turn event. + :param TurnEventStepSource source: (optional) + :param str condition_type: (optional) The type of condition (if any) that + is defined for the action. + :param str action_start_time: (optional) The time when the action started + processing the message. + :param str route_name: (optional) The name of the route. + """ + # pylint: disable=super-init-not-called + self.event = event + self.source = source + self.condition_type = condition_type + self.action_start_time = action_start_time + self.route_name = route_name + + @classmethod + def from_dict( + cls, + _dict: Dict) -> 'MessageOutputDebugTurnEventTurnEventManualRoute': + """Initialize a MessageOutputDebugTurnEventTurnEventManualRoute object from a json dictionary.""" + args = {} + if (event := _dict.get('event')) is not None: + args['event'] = event + if (source := _dict.get('source')) is not None: + args['source'] = TurnEventStepSource.from_dict(source) + if (condition_type := _dict.get('condition_type')) is not None: + args['condition_type'] = condition_type + if (action_start_time := _dict.get('action_start_time')) is not None: + args['action_start_time'] = action_start_time + if (route_name := _dict.get('route_name')) is not None: + args['route_name'] = route_name + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageOutputDebugTurnEventTurnEventManualRoute object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'event') and self.event is not None: + _dict['event'] = self.event + if hasattr(self, 'source') and self.source is not None: + if isinstance(self.source, dict): + _dict['source'] = self.source + else: + _dict['source'] = self.source.to_dict() + if hasattr(self, 'condition_type') and self.condition_type is not None: + _dict['condition_type'] = self.condition_type + if hasattr(self, + 'action_start_time') and self.action_start_time is not None: + _dict['action_start_time'] = self.action_start_time + if hasattr(self, 'route_name') and self.route_name is not None: + _dict['route_name'] = self.route_name + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MessageOutputDebugTurnEventTurnEventManualRoute object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__( + self, + other: 'MessageOutputDebugTurnEventTurnEventManualRoute') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__( + self, + other: 'MessageOutputDebugTurnEventTurnEventManualRoute') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class ConditionTypeEnum(str, Enum): + """ + The type of condition (if any) that is defined for the action. + """ + + USER_DEFINED = 'user_defined' + WELCOME = 'welcome' + ANYTHING_ELSE = 'anything_else' + + +class MessageOutputDebugTurnEventTurnEventNodeVisited( + MessageOutputDebugTurnEvent): + """ + MessageOutputDebugTurnEventTurnEventNodeVisited. + + :param str event: (optional) The type of turn event. + :param TurnEventNodeSource source: (optional) + :param str reason: (optional) The reason the dialog node was visited. + """ + + def __init__( + self, + *, + event: Optional[str] = None, + source: Optional['TurnEventNodeSource'] = None, + reason: Optional[str] = None, + ) -> None: + """ + Initialize a MessageOutputDebugTurnEventTurnEventNodeVisited object. + + :param str event: (optional) The type of turn event. + :param TurnEventNodeSource source: (optional) + :param str reason: (optional) The reason the dialog node was visited. + """ + # pylint: disable=super-init-not-called + self.event = event + self.source = source + self.reason = reason + + @classmethod + def from_dict( + cls, + _dict: Dict) -> 'MessageOutputDebugTurnEventTurnEventNodeVisited': + """Initialize a MessageOutputDebugTurnEventTurnEventNodeVisited object from a json dictionary.""" + args = {} + if (event := _dict.get('event')) is not None: + args['event'] = event + if (source := _dict.get('source')) is not None: + args['source'] = TurnEventNodeSource.from_dict(source) + if (reason := _dict.get('reason')) is not None: + args['reason'] = reason + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageOutputDebugTurnEventTurnEventNodeVisited object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'event') and self.event is not None: + _dict['event'] = self.event + if hasattr(self, 'source') and self.source is not None: + if isinstance(self.source, dict): + _dict['source'] = self.source + else: + _dict['source'] = self.source.to_dict() + if hasattr(self, 'reason') and self.reason is not None: + _dict['reason'] = self.reason + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MessageOutputDebugTurnEventTurnEventNodeVisited object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__( + self, + other: 'MessageOutputDebugTurnEventTurnEventNodeVisited') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__( + self, + other: 'MessageOutputDebugTurnEventTurnEventNodeVisited') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class ReasonEnum(str, Enum): + """ + The reason the dialog node was visited. + """ + + WELCOME = 'welcome' + BRANCH_START = 'branch_start' + TOPIC_SWITCH = 'topic_switch' + TOPIC_RETURN = 'topic_return' + TOPIC_SWITCH_WITHOUT_RETURN = 'topic_switch_without_return' + JUMP = 'jump' + + +class MessageOutputDebugTurnEventTurnEventSearch(MessageOutputDebugTurnEvent): + """ + MessageOutputDebugTurnEventTurnEventSearch. + + :param str event: (optional) The type of turn event. + :param TurnEventActionSource source: (optional) + :param TurnEventSearchError error: (optional) + """ + + def __init__( + self, + *, + event: Optional[str] = None, + source: Optional['TurnEventActionSource'] = None, + error: Optional['TurnEventSearchError'] = None, + ) -> None: + """ + Initialize a MessageOutputDebugTurnEventTurnEventSearch object. + + :param str event: (optional) The type of turn event. + :param TurnEventActionSource source: (optional) + :param TurnEventSearchError error: (optional) + """ + # pylint: disable=super-init-not-called + self.event = event + self.source = source + self.error = error + + @classmethod + def from_dict(cls, + _dict: Dict) -> 'MessageOutputDebugTurnEventTurnEventSearch': + """Initialize a MessageOutputDebugTurnEventTurnEventSearch object from a json dictionary.""" + args = {} + if (event := _dict.get('event')) is not None: + args['event'] = event + if (source := _dict.get('source')) is not None: + args['source'] = TurnEventActionSource.from_dict(source) + if (error := _dict.get('error')) is not None: + args['error'] = TurnEventSearchError.from_dict(error) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageOutputDebugTurnEventTurnEventSearch object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'event') and self.event is not None: + _dict['event'] = self.event + if hasattr(self, 'source') and self.source is not None: + if isinstance(self.source, dict): + _dict['source'] = self.source + else: + _dict['source'] = self.source.to_dict() + if hasattr(self, 'error') and self.error is not None: + if isinstance(self.error, dict): + _dict['error'] = self.error + else: + _dict['error'] = self.error.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MessageOutputDebugTurnEventTurnEventSearch object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, + other: 'MessageOutputDebugTurnEventTurnEventSearch') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, + other: 'MessageOutputDebugTurnEventTurnEventSearch') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class MessageOutputDebugTurnEventTurnEventStepAnswered( + MessageOutputDebugTurnEvent): + """ + MessageOutputDebugTurnEventTurnEventStepAnswered. + + :param str event: (optional) The type of turn event. + :param TurnEventActionSource source: (optional) + :param str condition_type: (optional) The type of condition (if any) that is + defined for the action. + :param str action_start_time: (optional) The time when the action started + processing the message. + :param bool prompted: (optional) Whether the step was answered in response to a + prompt from the assistant. If this property is `false`, the user provided the + answer without visiting the step. + """ + + def __init__( + self, + *, + event: Optional[str] = None, + source: Optional['TurnEventActionSource'] = None, + condition_type: Optional[str] = None, + action_start_time: Optional[str] = None, + prompted: Optional[bool] = None, + ) -> None: + """ + Initialize a MessageOutputDebugTurnEventTurnEventStepAnswered object. + + :param str event: (optional) The type of turn event. + :param TurnEventActionSource source: (optional) + :param str condition_type: (optional) The type of condition (if any) that + is defined for the action. + :param str action_start_time: (optional) The time when the action started + processing the message. + :param bool prompted: (optional) Whether the step was answered in response + to a prompt from the assistant. If this property is `false`, the user + provided the answer without visiting the step. + """ + # pylint: disable=super-init-not-called + self.event = event + self.source = source + self.condition_type = condition_type + self.action_start_time = action_start_time + self.prompted = prompted + + @classmethod + def from_dict( + cls, + _dict: Dict) -> 'MessageOutputDebugTurnEventTurnEventStepAnswered': + """Initialize a MessageOutputDebugTurnEventTurnEventStepAnswered object from a json dictionary.""" + args = {} + if (event := _dict.get('event')) is not None: + args['event'] = event + if (source := _dict.get('source')) is not None: + args['source'] = TurnEventActionSource.from_dict(source) + if (condition_type := _dict.get('condition_type')) is not None: + args['condition_type'] = condition_type + if (action_start_time := _dict.get('action_start_time')) is not None: + args['action_start_time'] = action_start_time + if (prompted := _dict.get('prompted')) is not None: + args['prompted'] = prompted + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageOutputDebugTurnEventTurnEventStepAnswered object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'event') and self.event is not None: + _dict['event'] = self.event + if hasattr(self, 'source') and self.source is not None: + if isinstance(self.source, dict): + _dict['source'] = self.source + else: + _dict['source'] = self.source.to_dict() + if hasattr(self, 'condition_type') and self.condition_type is not None: + _dict['condition_type'] = self.condition_type + if hasattr(self, + 'action_start_time') and self.action_start_time is not None: + _dict['action_start_time'] = self.action_start_time + if hasattr(self, 'prompted') and self.prompted is not None: + _dict['prompted'] = self.prompted + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MessageOutputDebugTurnEventTurnEventStepAnswered object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__( + self, + other: 'MessageOutputDebugTurnEventTurnEventStepAnswered') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__( + self, + other: 'MessageOutputDebugTurnEventTurnEventStepAnswered') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class ConditionTypeEnum(str, Enum): + """ + The type of condition (if any) that is defined for the action. + """ + + USER_DEFINED = 'user_defined' + WELCOME = 'welcome' + ANYTHING_ELSE = 'anything_else' + + +class MessageOutputDebugTurnEventTurnEventStepVisited( + MessageOutputDebugTurnEvent): + """ + MessageOutputDebugTurnEventTurnEventStepVisited. + + :param str event: (optional) The type of turn event. + :param TurnEventActionSource source: (optional) + :param str condition_type: (optional) The type of condition (if any) that is + defined for the action. + :param str action_start_time: (optional) The time when the action started + processing the message. + :param bool has_question: (optional) Whether the step collects a customer + response. + """ + + def __init__( + self, + *, + event: Optional[str] = None, + source: Optional['TurnEventActionSource'] = None, + condition_type: Optional[str] = None, + action_start_time: Optional[str] = None, + has_question: Optional[bool] = None, + ) -> None: + """ + Initialize a MessageOutputDebugTurnEventTurnEventStepVisited object. + + :param str event: (optional) The type of turn event. + :param TurnEventActionSource source: (optional) + :param str condition_type: (optional) The type of condition (if any) that + is defined for the action. + :param str action_start_time: (optional) The time when the action started + processing the message. + :param bool has_question: (optional) Whether the step collects a customer + response. + """ + # pylint: disable=super-init-not-called + self.event = event + self.source = source + self.condition_type = condition_type + self.action_start_time = action_start_time + self.has_question = has_question + + @classmethod + def from_dict( + cls, + _dict: Dict) -> 'MessageOutputDebugTurnEventTurnEventStepVisited': + """Initialize a MessageOutputDebugTurnEventTurnEventStepVisited object from a json dictionary.""" + args = {} + if (event := _dict.get('event')) is not None: + args['event'] = event + if (source := _dict.get('source')) is not None: + args['source'] = TurnEventActionSource.from_dict(source) + if (condition_type := _dict.get('condition_type')) is not None: + args['condition_type'] = condition_type + if (action_start_time := _dict.get('action_start_time')) is not None: + args['action_start_time'] = action_start_time + if (has_question := _dict.get('has_question')) is not None: + args['has_question'] = has_question + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageOutputDebugTurnEventTurnEventStepVisited object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'event') and self.event is not None: + _dict['event'] = self.event + if hasattr(self, 'source') and self.source is not None: + if isinstance(self.source, dict): + _dict['source'] = self.source + else: + _dict['source'] = self.source.to_dict() + if hasattr(self, 'condition_type') and self.condition_type is not None: + _dict['condition_type'] = self.condition_type + if hasattr(self, + 'action_start_time') and self.action_start_time is not None: + _dict['action_start_time'] = self.action_start_time + if hasattr(self, 'has_question') and self.has_question is not None: + _dict['has_question'] = self.has_question + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MessageOutputDebugTurnEventTurnEventStepVisited object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__( + self, + other: 'MessageOutputDebugTurnEventTurnEventStepVisited') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__( + self, + other: 'MessageOutputDebugTurnEventTurnEventStepVisited') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class ConditionTypeEnum(str, Enum): + """ + The type of condition (if any) that is defined for the action. + """ + + USER_DEFINED = 'user_defined' + WELCOME = 'welcome' + ANYTHING_ELSE = 'anything_else' + + +class MessageOutputDebugTurnEventTurnEventSuggestionIntentsDenied( + MessageOutputDebugTurnEvent): + """ + MessageOutputDebugTurnEventTurnEventSuggestionIntentsDenied. + + :param str event: (optional) The type of turn event. + :param List[RuntimeIntent] intents_denied: (optional) An array of denied + intents. + """ + + def __init__( + self, + *, + event: Optional[str] = None, + intents_denied: Optional[List['RuntimeIntent']] = None, + ) -> None: + """ + Initialize a MessageOutputDebugTurnEventTurnEventSuggestionIntentsDenied object. + + :param str event: (optional) The type of turn event. + :param List[RuntimeIntent] intents_denied: (optional) An array of denied + intents. + """ + # pylint: disable=super-init-not-called + self.event = event + self.intents_denied = intents_denied + + @classmethod + def from_dict( + cls, _dict: Dict + ) -> 'MessageOutputDebugTurnEventTurnEventSuggestionIntentsDenied': + """Initialize a MessageOutputDebugTurnEventTurnEventSuggestionIntentsDenied object from a json dictionary.""" + args = {} + if (event := _dict.get('event')) is not None: + args['event'] = event + if (intents_denied := _dict.get('intents_denied')) is not None: + args['intents_denied'] = [ + RuntimeIntent.from_dict(v) for v in intents_denied + ] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageOutputDebugTurnEventTurnEventSuggestionIntentsDenied object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'event') and self.event is not None: + _dict['event'] = self.event + if hasattr(self, 'intents_denied') and self.intents_denied is not None: + intents_denied_list = [] + for v in self.intents_denied: + if isinstance(v, dict): + intents_denied_list.append(v) + else: + intents_denied_list.append(v.to_dict()) + _dict['intents_denied'] = intents_denied_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MessageOutputDebugTurnEventTurnEventSuggestionIntentsDenied object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__( + self, + other: 'MessageOutputDebugTurnEventTurnEventSuggestionIntentsDenied' + ) -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__( + self, + other: 'MessageOutputDebugTurnEventTurnEventSuggestionIntentsDenied' + ) -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class MessageOutputDebugTurnEventTurnEventTopicSwitchDenied( + MessageOutputDebugTurnEvent): + """ + MessageOutputDebugTurnEventTurnEventTopicSwitchDenied. + + :param str event: (optional) The type of turn event. + :param TurnEventActionSource source: (optional) + :param str condition_type: (optional) The type of condition (if any) that is + defined for the action. + :param str reason: (optional) The reason the action was visited. + """ + + def __init__( + self, + *, + event: Optional[str] = None, + source: Optional['TurnEventActionSource'] = None, + condition_type: Optional[str] = None, + reason: Optional[str] = None, + ) -> None: + """ + Initialize a MessageOutputDebugTurnEventTurnEventTopicSwitchDenied object. + + :param str event: (optional) The type of turn event. + :param TurnEventActionSource source: (optional) + :param str condition_type: (optional) The type of condition (if any) that + is defined for the action. + :param str reason: (optional) The reason the action was visited. + """ + # pylint: disable=super-init-not-called + self.event = event + self.source = source + self.condition_type = condition_type + self.reason = reason + + @classmethod + def from_dict( + cls, _dict: Dict + ) -> 'MessageOutputDebugTurnEventTurnEventTopicSwitchDenied': + """Initialize a MessageOutputDebugTurnEventTurnEventTopicSwitchDenied object from a json dictionary.""" + args = {} + if (event := _dict.get('event')) is not None: + args['event'] = event + if (source := _dict.get('source')) is not None: + args['source'] = TurnEventActionSource.from_dict(source) + if (condition_type := _dict.get('condition_type')) is not None: + args['condition_type'] = condition_type + if (reason := _dict.get('reason')) is not None: + args['reason'] = reason + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageOutputDebugTurnEventTurnEventTopicSwitchDenied object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'event') and self.event is not None: + _dict['event'] = self.event + if hasattr(self, 'source') and self.source is not None: + if isinstance(self.source, dict): + _dict['source'] = self.source + else: + _dict['source'] = self.source.to_dict() + if hasattr(self, 'condition_type') and self.condition_type is not None: + _dict['condition_type'] = self.condition_type + if hasattr(self, 'reason') and self.reason is not None: + _dict['reason'] = self.reason + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MessageOutputDebugTurnEventTurnEventTopicSwitchDenied object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__( + self, other: 'MessageOutputDebugTurnEventTurnEventTopicSwitchDenied' + ) -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__( + self, other: 'MessageOutputDebugTurnEventTurnEventTopicSwitchDenied' + ) -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class ConditionTypeEnum(str, Enum): + """ + The type of condition (if any) that is defined for the action. + """ + + USER_DEFINED = 'user_defined' + WELCOME = 'welcome' + ANYTHING_ELSE = 'anything_else' + + class ReasonEnum(str, Enum): + """ + The reason the action was visited. + """ + + ACTION_CONDITIONS_FAILED = 'action_conditions_failed' + + +class MessageStreamResponseMessageStreamCompleteItem(MessageStreamResponse): + """ + A completed response item. A complete item is a composition of every streamed partial + item with the same streaming_metadata.id, and each complete item contains its own + unique streaming_metadata.id. + + :param CompleteItem complete_item: (optional) + """ + + def __init__( + self, + *, + complete_item: Optional['CompleteItem'] = None, + ) -> None: + """ + Initialize a MessageStreamResponseMessageStreamCompleteItem object. + + :param CompleteItem complete_item: (optional) + """ + # pylint: disable=super-init-not-called + self.complete_item = complete_item + + @classmethod + def from_dict( + cls, + _dict: Dict) -> 'MessageStreamResponseMessageStreamCompleteItem': + """Initialize a MessageStreamResponseMessageStreamCompleteItem object from a json dictionary.""" + args = {} + if (complete_item := _dict.get('complete_item')) is not None: + args['complete_item'] = CompleteItem.from_dict(complete_item) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageStreamResponseMessageStreamCompleteItem object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'complete_item') and self.complete_item is not None: + if isinstance(self.complete_item, dict): + _dict['complete_item'] = self.complete_item + else: + _dict['complete_item'] = self.complete_item.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MessageStreamResponseMessageStreamCompleteItem object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, + other: 'MessageStreamResponseMessageStreamCompleteItem') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, + other: 'MessageStreamResponseMessageStreamCompleteItem') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class MessageStreamResponseMessageStreamPartialItem(MessageStreamResponse): + """ + A chunk of the streamed message response. + + :param PartialItem partial_item: (optional) Message response partial item + content. + """ + + def __init__( + self, + *, + partial_item: Optional['PartialItem'] = None, + ) -> None: + """ + Initialize a MessageStreamResponseMessageStreamPartialItem object. + + :param PartialItem partial_item: (optional) Message response partial item + content. + """ + # pylint: disable=super-init-not-called + self.partial_item = partial_item + + @classmethod + def from_dict( + cls, + _dict: Dict) -> 'MessageStreamResponseMessageStreamPartialItem': + """Initialize a MessageStreamResponseMessageStreamPartialItem object from a json dictionary.""" + args = {} + if (partial_item := _dict.get('partial_item')) is not None: + args['partial_item'] = PartialItem.from_dict(partial_item) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageStreamResponseMessageStreamPartialItem object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'partial_item') and self.partial_item is not None: + if isinstance(self.partial_item, dict): + _dict['partial_item'] = self.partial_item + else: + _dict['partial_item'] = self.partial_item.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MessageStreamResponseMessageStreamPartialItem object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, + other: 'MessageStreamResponseMessageStreamPartialItem') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, + other: 'MessageStreamResponseMessageStreamPartialItem') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class MessageStreamResponseStatefulMessageStreamFinalResponse( + MessageStreamResponse): + """ + The final and stateful message response. + + :param FinalResponse final_response: (optional) Message final response content. + """ + + def __init__( + self, + *, + final_response: Optional['FinalResponse'] = None, + ) -> None: + """ + Initialize a MessageStreamResponseStatefulMessageStreamFinalResponse object. + + :param FinalResponse final_response: (optional) Message final response + content. + """ + # pylint: disable=super-init-not-called + self.final_response = final_response + + @classmethod + def from_dict( + cls, _dict: Dict + ) -> 'MessageStreamResponseStatefulMessageStreamFinalResponse': + """Initialize a MessageStreamResponseStatefulMessageStreamFinalResponse object from a json dictionary.""" + args = {} + if (final_response := _dict.get('final_response')) is not None: + args['final_response'] = FinalResponse.from_dict(final_response) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a MessageStreamResponseStatefulMessageStreamFinalResponse object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'final_response') and self.final_response is not None: + if isinstance(self.final_response, dict): + _dict['final_response'] = self.final_response + else: + _dict['final_response'] = self.final_response.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this MessageStreamResponseStatefulMessageStreamFinalResponse object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__( + self, other: 'MessageStreamResponseStatefulMessageStreamFinalResponse' + ) -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__( + self, other: 'MessageStreamResponseStatefulMessageStreamFinalResponse' + ) -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2AuthorizationCode( + ProviderAuthenticationOAuth2Flows): + """ + Non-private authentication settings for authorization-code flow. + + :param str token_url: (optional) The token URL. + :param str refresh_url: (optional) The refresh token URL. + :param str client_auth_type: (optional) The client authorization type. + :param str content_type: (optional) The content type. + :param str header_prefix: (optional) The prefix fo the header. + :param str authorization_url: (optional) The authorization URL. + :param str redirect_uri: (optional) The redirect URI. + """ + + def __init__( + self, + *, + token_url: Optional[str] = None, + refresh_url: Optional[str] = None, + client_auth_type: Optional[str] = None, + content_type: Optional[str] = None, + header_prefix: Optional[str] = None, + authorization_url: Optional[str] = None, + redirect_uri: Optional[str] = None, + ) -> None: + """ + Initialize a ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2AuthorizationCode object. + + :param str token_url: (optional) The token URL. + :param str refresh_url: (optional) The refresh token URL. + :param str client_auth_type: (optional) The client authorization type. + :param str content_type: (optional) The content type. + :param str header_prefix: (optional) The prefix fo the header. + :param str authorization_url: (optional) The authorization URL. + :param str redirect_uri: (optional) The redirect URI. + """ + # pylint: disable=super-init-not-called + self.token_url = token_url + self.refresh_url = refresh_url + self.client_auth_type = client_auth_type + self.content_type = content_type + self.header_prefix = header_prefix + self.authorization_url = authorization_url + self.redirect_uri = redirect_uri + + @classmethod + def from_dict( + cls, _dict: Dict + ) -> 'ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2AuthorizationCode': + """Initialize a ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2AuthorizationCode object from a json dictionary.""" + args = {} + if (token_url := _dict.get('token_url')) is not None: + args['token_url'] = token_url + if (refresh_url := _dict.get('refresh_url')) is not None: + args['refresh_url'] = refresh_url + if (client_auth_type := _dict.get('client_auth_type')) is not None: + args['client_auth_type'] = client_auth_type + if (content_type := _dict.get('content_type')) is not None: + args['content_type'] = content_type + if (header_prefix := _dict.get('header_prefix')) is not None: + args['header_prefix'] = header_prefix + if (authorization_url := _dict.get('authorization_url')) is not None: + args['authorization_url'] = authorization_url + if (redirect_uri := _dict.get('redirect_uri')) is not None: + args['redirect_uri'] = redirect_uri + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2AuthorizationCode object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'token_url') and self.token_url is not None: + _dict['token_url'] = self.token_url + if hasattr(self, 'refresh_url') and self.refresh_url is not None: + _dict['refresh_url'] = self.refresh_url + if hasattr(self, + 'client_auth_type') and self.client_auth_type is not None: + _dict['client_auth_type'] = self.client_auth_type + if hasattr(self, 'content_type') and self.content_type is not None: + _dict['content_type'] = self.content_type + if hasattr(self, 'header_prefix') and self.header_prefix is not None: + _dict['header_prefix'] = self.header_prefix + if hasattr(self, + 'authorization_url') and self.authorization_url is not None: + _dict['authorization_url'] = self.authorization_url + if hasattr(self, 'redirect_uri') and self.redirect_uri is not None: + _dict['redirect_uri'] = self.redirect_uri + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2AuthorizationCode object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__( + self, other: + 'ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2AuthorizationCode' + ) -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__( + self, other: + 'ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2AuthorizationCode' + ) -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class ClientAuthTypeEnum(str, Enum): + """ + The client authorization type. + """ + + BODY = 'Body' + BASICAUTHHEADER = 'BasicAuthHeader' + + +class ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2ClientCredentials( + ProviderAuthenticationOAuth2Flows): + """ + ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2ClientCredentials. + + :param str token_url: (optional) The token URL. + :param str refresh_url: (optional) The refresh token URL. + :param str client_auth_type: (optional) The client authorization type. + :param str content_type: (optional) The content type. + :param str header_prefix: (optional) The prefix fo the header. + """ + + def __init__( + self, + *, + token_url: Optional[str] = None, + refresh_url: Optional[str] = None, + client_auth_type: Optional[str] = None, + content_type: Optional[str] = None, + header_prefix: Optional[str] = None, + ) -> None: + """ + Initialize a ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2ClientCredentials object. + + :param str token_url: (optional) The token URL. + :param str refresh_url: (optional) The refresh token URL. + :param str client_auth_type: (optional) The client authorization type. + :param str content_type: (optional) The content type. + :param str header_prefix: (optional) The prefix fo the header. + """ + # pylint: disable=super-init-not-called + self.token_url = token_url + self.refresh_url = refresh_url + self.client_auth_type = client_auth_type + self.content_type = content_type + self.header_prefix = header_prefix + + @classmethod + def from_dict( + cls, _dict: Dict + ) -> 'ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2ClientCredentials': + """Initialize a ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2ClientCredentials object from a json dictionary.""" + args = {} + if (token_url := _dict.get('token_url')) is not None: + args['token_url'] = token_url + if (refresh_url := _dict.get('refresh_url')) is not None: + args['refresh_url'] = refresh_url + if (client_auth_type := _dict.get('client_auth_type')) is not None: + args['client_auth_type'] = client_auth_type + if (content_type := _dict.get('content_type')) is not None: + args['content_type'] = content_type + if (header_prefix := _dict.get('header_prefix')) is not None: + args['header_prefix'] = header_prefix + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2ClientCredentials object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'token_url') and self.token_url is not None: + _dict['token_url'] = self.token_url + if hasattr(self, 'refresh_url') and self.refresh_url is not None: + _dict['refresh_url'] = self.refresh_url + if hasattr(self, + 'client_auth_type') and self.client_auth_type is not None: + _dict['client_auth_type'] = self.client_auth_type + if hasattr(self, 'content_type') and self.content_type is not None: + _dict['content_type'] = self.content_type + if hasattr(self, 'header_prefix') and self.header_prefix is not None: + _dict['header_prefix'] = self.header_prefix + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2ClientCredentials object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__( + self, other: + 'ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2ClientCredentials' + ) -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__( + self, other: + 'ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2ClientCredentials' + ) -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class ClientAuthTypeEnum(str, Enum): + """ + The client authorization type. + """ + + BODY = 'Body' + BASICAUTHHEADER = 'BasicAuthHeader' + + +class ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password( + ProviderAuthenticationOAuth2Flows): + """ + Non-private authentication settings for resource owner password flow. + + :param str token_url: (optional) The token URL. + :param str refresh_url: (optional) The refresh token URL. + :param str client_auth_type: (optional) The client authorization type. + :param str content_type: (optional) The content type. + :param str header_prefix: (optional) The prefix fo the header. + :param ProviderAuthenticationOAuth2PasswordUsername username: (optional) The + username for oauth2 authentication when the preferred flow is "password". + """ + + def __init__( + self, + *, + token_url: Optional[str] = None, + refresh_url: Optional[str] = None, + client_auth_type: Optional[str] = None, + content_type: Optional[str] = None, + header_prefix: Optional[str] = None, + username: Optional[ + 'ProviderAuthenticationOAuth2PasswordUsername'] = None, + ) -> None: + """ + Initialize a ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password object. + + :param str token_url: (optional) The token URL. + :param str refresh_url: (optional) The refresh token URL. + :param str client_auth_type: (optional) The client authorization type. + :param str content_type: (optional) The content type. + :param str header_prefix: (optional) The prefix fo the header. + :param ProviderAuthenticationOAuth2PasswordUsername username: (optional) + The username for oauth2 authentication when the preferred flow is + "password". + """ + # pylint: disable=super-init-not-called + self.token_url = token_url + self.refresh_url = refresh_url + self.client_auth_type = client_auth_type + self.content_type = content_type + self.header_prefix = header_prefix + self.username = username + + @classmethod + def from_dict( + cls, _dict: Dict + ) -> 'ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password': + """Initialize a ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password object from a json dictionary.""" + args = {} + if (token_url := _dict.get('token_url')) is not None: + args['token_url'] = token_url + if (refresh_url := _dict.get('refresh_url')) is not None: + args['refresh_url'] = refresh_url + if (client_auth_type := _dict.get('client_auth_type')) is not None: + args['client_auth_type'] = client_auth_type + if (content_type := _dict.get('content_type')) is not None: + args['content_type'] = content_type + if (header_prefix := _dict.get('header_prefix')) is not None: + args['header_prefix'] = header_prefix + if (username := _dict.get('username')) is not None: + args[ + 'username'] = ProviderAuthenticationOAuth2PasswordUsername.from_dict( + username) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'token_url') and self.token_url is not None: + _dict['token_url'] = self.token_url + if hasattr(self, 'refresh_url') and self.refresh_url is not None: + _dict['refresh_url'] = self.refresh_url + if hasattr(self, + 'client_auth_type') and self.client_auth_type is not None: + _dict['client_auth_type'] = self.client_auth_type + if hasattr(self, 'content_type') and self.content_type is not None: + _dict['content_type'] = self.content_type + if hasattr(self, 'header_prefix') and self.header_prefix is not None: + _dict['header_prefix'] = self.header_prefix + if hasattr(self, 'username') and self.username is not None: + if isinstance(self.username, dict): + _dict['username'] = self.username + else: + _dict['username'] = self.username.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__( + self, other: + 'ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password' + ) -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__( + self, other: + 'ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password' + ) -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class ClientAuthTypeEnum(str, Enum): + """ + The client authorization type. + """ + + BODY = 'Body' + BASICAUTHHEADER = 'BasicAuthHeader' + + +class ProviderPrivateAuthenticationBasicFlow(ProviderPrivateAuthentication): + """ + The private data for basic authentication. + + :param ProviderAuthenticationTypeAndValue password: (optional) The password for + bearer authentication. + """ + + def __init__( + self, + *, + password: Optional['ProviderAuthenticationTypeAndValue'] = None, + ) -> None: + """ + Initialize a ProviderPrivateAuthenticationBasicFlow object. + + :param ProviderAuthenticationTypeAndValue password: (optional) The password + for bearer authentication. + """ + # pylint: disable=super-init-not-called + self.password = password + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ProviderPrivateAuthenticationBasicFlow': + """Initialize a ProviderPrivateAuthenticationBasicFlow object from a json dictionary.""" + args = {} + if (password := _dict.get('password')) is not None: + args['password'] = ProviderAuthenticationTypeAndValue.from_dict( + password) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ProviderPrivateAuthenticationBasicFlow object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'password') and self.password is not None: + if isinstance(self.password, dict): + _dict['password'] = self.password + else: + _dict['password'] = self.password.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ProviderPrivateAuthenticationBasicFlow object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ProviderPrivateAuthenticationBasicFlow') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ProviderPrivateAuthenticationBasicFlow') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ProviderPrivateAuthenticationBearerFlow(ProviderPrivateAuthentication): + """ + The private data for bearer authentication. + + :param ProviderAuthenticationTypeAndValue token: (optional) The token for bearer + authentication. + """ + + def __init__( + self, + *, + token: Optional['ProviderAuthenticationTypeAndValue'] = None, + ) -> None: + """ + Initialize a ProviderPrivateAuthenticationBearerFlow object. + + :param ProviderAuthenticationTypeAndValue token: (optional) The token for + bearer authentication. + """ + # pylint: disable=super-init-not-called + self.token = token + + @classmethod + def from_dict(cls, + _dict: Dict) -> 'ProviderPrivateAuthenticationBearerFlow': + """Initialize a ProviderPrivateAuthenticationBearerFlow object from a json dictionary.""" + args = {} + if (token := _dict.get('token')) is not None: + args['token'] = ProviderAuthenticationTypeAndValue.from_dict(token) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ProviderPrivateAuthenticationBearerFlow object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'token') and self.token is not None: + if isinstance(self.token, dict): + _dict['token'] = self.token + else: + _dict['token'] = self.token.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ProviderPrivateAuthenticationBearerFlow object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ProviderPrivateAuthenticationBearerFlow') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ProviderPrivateAuthenticationBearerFlow') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ProviderPrivateAuthenticationOAuth2Flow(ProviderPrivateAuthentication): + """ + The private data for oauth2 authentication. + + :param ProviderPrivateAuthenticationOAuth2FlowFlows flows: (optional) Scenarios + performed by the API client to fetch an access token from the authorization + server. + """ + + def __init__( + self, + *, + flows: Optional['ProviderPrivateAuthenticationOAuth2FlowFlows'] = None, + ) -> None: + """ + Initialize a ProviderPrivateAuthenticationOAuth2Flow object. + + :param ProviderPrivateAuthenticationOAuth2FlowFlows flows: (optional) + Scenarios performed by the API client to fetch an access token from the + authorization server. + """ + # pylint: disable=super-init-not-called + self.flows = flows + + @classmethod + def from_dict(cls, + _dict: Dict) -> 'ProviderPrivateAuthenticationOAuth2Flow': + """Initialize a ProviderPrivateAuthenticationOAuth2Flow object from a json dictionary.""" + args = {} + if (flows := _dict.get('flows')) is not None: + args['flows'] = flows + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ProviderPrivateAuthenticationOAuth2Flow object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'flows') and self.flows is not None: + if isinstance(self.flows, dict): + _dict['flows'] = self.flows + else: + _dict['flows'] = self.flows.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ProviderPrivateAuthenticationOAuth2Flow object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ProviderPrivateAuthenticationOAuth2Flow') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ProviderPrivateAuthenticationOAuth2Flow') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2AuthorizationCode( + ProviderPrivateAuthenticationOAuth2FlowFlows): + """ + Private authentication settings for client credentials flow. + + :param str client_id: (optional) The client ID. + :param str client_secret: (optional) The client secret. + :param str access_token: (optional) The access token. + :param str refresh_token: (optional) The refresh token. + :param str authorization_code: (optional) The authorization code. + """ + + def __init__( + self, + *, + client_id: Optional[str] = None, + client_secret: Optional[str] = None, + access_token: Optional[str] = None, + refresh_token: Optional[str] = None, + authorization_code: Optional[str] = None, + ) -> None: + """ + Initialize a ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2AuthorizationCode object. + + :param str client_id: (optional) The client ID. + :param str client_secret: (optional) The client secret. + :param str access_token: (optional) The access token. + :param str refresh_token: (optional) The refresh token. + :param str authorization_code: (optional) The authorization code. + """ + # pylint: disable=super-init-not-called + self.client_id = client_id + self.client_secret = client_secret + self.access_token = access_token + self.refresh_token = refresh_token + self.authorization_code = authorization_code + + @classmethod + def from_dict( + cls, _dict: Dict + ) -> 'ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2AuthorizationCode': + """Initialize a ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2AuthorizationCode object from a json dictionary.""" + args = {} + if (client_id := _dict.get('client_id')) is not None: + args['client_id'] = client_id + if (client_secret := _dict.get('client_secret')) is not None: + args['client_secret'] = client_secret + if (access_token := _dict.get('access_token')) is not None: + args['access_token'] = access_token + if (refresh_token := _dict.get('refresh_token')) is not None: + args['refresh_token'] = refresh_token + if (authorization_code := _dict.get('authorization_code')) is not None: + args['authorization_code'] = authorization_code + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2AuthorizationCode object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'client_id') and self.client_id is not None: + _dict['client_id'] = self.client_id + if hasattr(self, 'client_secret') and self.client_secret is not None: + _dict['client_secret'] = self.client_secret + if hasattr(self, 'access_token') and self.access_token is not None: + _dict['access_token'] = self.access_token + if hasattr(self, 'refresh_token') and self.refresh_token is not None: + _dict['refresh_token'] = self.refresh_token + if hasattr( + self, + 'authorization_code') and self.authorization_code is not None: + _dict['authorization_code'] = self.authorization_code + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2AuthorizationCode object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__( + self, other: + 'ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2AuthorizationCode' + ) -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__( + self, other: + 'ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2AuthorizationCode' + ) -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other -class CaptureGroup(object): +class ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2ClientCredentials( + ProviderPrivateAuthenticationOAuth2FlowFlows): """ - CaptureGroup. + ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2ClientCredentials. - :attr str group: A recognized capture group for the entity. - :attr list[int] location: (optional) Zero-based character offsets that indicate where - the entity value begins and ends in the input text. + :param str client_id: (optional) The client ID. + :param str client_secret: (optional) The client secret. + :param str access_token: (optional) The access token. + :param str refresh_token: (optional) The refresh token. """ - def __init__(self, group, location=None): + def __init__( + self, + *, + client_id: Optional[str] = None, + client_secret: Optional[str] = None, + access_token: Optional[str] = None, + refresh_token: Optional[str] = None, + ) -> None: """ - Initialize a CaptureGroup object. + Initialize a ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2ClientCredentials object. - :param str group: A recognized capture group for the entity. - :param list[int] location: (optional) Zero-based character offsets that indicate - where the entity value begins and ends in the input text. + :param str client_id: (optional) The client ID. + :param str client_secret: (optional) The client secret. + :param str access_token: (optional) The access token. + :param str refresh_token: (optional) The refresh token. """ - self.group = group - self.location = location + # pylint: disable=super-init-not-called + self.client_id = client_id + self.client_secret = client_secret + self.access_token = access_token + self.refresh_token = refresh_token @classmethod - def _from_dict(cls, _dict): - """Initialize a CaptureGroup object from a json dictionary.""" + def from_dict( + cls, _dict: Dict + ) -> 'ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2ClientCredentials': + """Initialize a ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2ClientCredentials object from a json dictionary.""" args = {} - if 'group' in _dict: - args['group'] = _dict.get('group') - else: - raise ValueError( - 'Required property \'group\' not present in CaptureGroup JSON') - if 'location' in _dict: - args['location'] = _dict.get('location') + if (client_id := _dict.get('client_id')) is not None: + args['client_id'] = client_id + if (client_secret := _dict.get('client_secret')) is not None: + args['client_secret'] = client_secret + if (access_token := _dict.get('access_token')) is not None: + args['access_token'] = access_token + if (refresh_token := _dict.get('refresh_token')) is not None: + args['refresh_token'] = refresh_token return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2ClientCredentials object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'group') and self.group is not None: - _dict['group'] = self.group - if hasattr(self, 'location') and self.location is not None: - _dict['location'] = self.location + if hasattr(self, 'client_id') and self.client_id is not None: + _dict['client_id'] = self.client_id + if hasattr(self, 'client_secret') and self.client_secret is not None: + _dict['client_secret'] = self.client_secret + if hasattr(self, 'access_token') and self.access_token is not None: + _dict['access_token'] = self.access_token + if hasattr(self, 'refresh_token') and self.refresh_token is not None: + _dict['refresh_token'] = self.refresh_token return _dict - def __str__(self): - """Return a `str` version of this CaptureGroup object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __eq__(self, other): + def __str__(self) -> str: + """Return a `str` version of this ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2ClientCredentials object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__( + self, other: + 'ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2ClientCredentials' + ) -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__( + self, other: + 'ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2ClientCredentials' + ) -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class DialogLogMessage(object): +class ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2Password( + ProviderPrivateAuthenticationOAuth2FlowFlows): """ - Dialog log message details. + Private authentication settings for resource owner password flow. - :attr str level: The severity of the log message. - :attr str message: The text of the log message. + :param str client_id: (optional) The client ID. + :param str client_secret: (optional) The client secret. + :param str access_token: (optional) The access token. + :param str refresh_token: (optional) The refresh token. + :param ProviderPrivateAuthenticationOAuth2PasswordPassword password: (optional) + The password for oauth2 authentication when the preferred flow is "password". """ - def __init__(self, level, message): + def __init__( + self, + *, + client_id: Optional[str] = None, + client_secret: Optional[str] = None, + access_token: Optional[str] = None, + refresh_token: Optional[str] = None, + password: Optional[ + 'ProviderPrivateAuthenticationOAuth2PasswordPassword'] = None, + ) -> None: """ - Initialize a DialogLogMessage object. + Initialize a ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2Password object. - :param str level: The severity of the log message. - :param str message: The text of the log message. + :param str client_id: (optional) The client ID. + :param str client_secret: (optional) The client secret. + :param str access_token: (optional) The access token. + :param str refresh_token: (optional) The refresh token. + :param ProviderPrivateAuthenticationOAuth2PasswordPassword password: + (optional) The password for oauth2 authentication when the preferred flow + is "password". """ - self.level = level - self.message = message + # pylint: disable=super-init-not-called + self.client_id = client_id + self.client_secret = client_secret + self.access_token = access_token + self.refresh_token = refresh_token + self.password = password @classmethod - def _from_dict(cls, _dict): - """Initialize a DialogLogMessage object from a json dictionary.""" + def from_dict( + cls, _dict: Dict + ) -> 'ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2Password': + """Initialize a ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2Password object from a json dictionary.""" args = {} - if 'level' in _dict: - args['level'] = _dict.get('level') - else: - raise ValueError( - 'Required property \'level\' not present in DialogLogMessage JSON' - ) - if 'message' in _dict: - args['message'] = _dict.get('message') - else: - raise ValueError( - 'Required property \'message\' not present in DialogLogMessage JSON' - ) + if (client_id := _dict.get('client_id')) is not None: + args['client_id'] = client_id + if (client_secret := _dict.get('client_secret')) is not None: + args['client_secret'] = client_secret + if (access_token := _dict.get('access_token')) is not None: + args['access_token'] = access_token + if (refresh_token := _dict.get('refresh_token')) is not None: + args['refresh_token'] = refresh_token + if (password := _dict.get('password')) is not None: + args[ + 'password'] = ProviderPrivateAuthenticationOAuth2PasswordPassword.from_dict( + password) return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2Password object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'level') and self.level is not None: - _dict['level'] = self.level - if hasattr(self, 'message') and self.message is not None: - _dict['message'] = self.message + if hasattr(self, 'client_id') and self.client_id is not None: + _dict['client_id'] = self.client_id + if hasattr(self, 'client_secret') and self.client_secret is not None: + _dict['client_secret'] = self.client_secret + if hasattr(self, 'access_token') and self.access_token is not None: + _dict['access_token'] = self.access_token + if hasattr(self, 'refresh_token') and self.refresh_token is not None: + _dict['refresh_token'] = self.refresh_token + if hasattr(self, 'password') and self.password is not None: + if isinstance(self.password, dict): + _dict['password'] = self.password + else: + _dict['password'] = self.password.to_dict() return _dict - def __str__(self): - """Return a `str` version of this DialogLogMessage object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2Password object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__( + self, other: + 'ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2Password' + ) -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__( + self, other: + 'ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2Password' + ) -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class DialogNodeAction(object): +class RuntimeResponseGenericRuntimeResponseTypeAudio(RuntimeResponseGeneric): """ - DialogNodeAction. + RuntimeResponseGenericRuntimeResponseTypeAudio. - :attr str name: The name of the action. - :attr str action_type: (optional) The type of action to invoke. - :attr dict parameters: (optional) A map of key/value pairs to be provided to the - action. - :attr str result_variable: The location in the dialog context where the result of the - action is stored. - :attr str credentials: (optional) The name of the context variable that the client - application will use to pass in credentials for the action. + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + :param str source: The `https:` URL of the audio clip. + :param str title: (optional) The title or introductory text to show before the + response. + :param str description: (optional) The description to show with the the + response. + :param List[ResponseGenericChannel] channels: (optional) An array of objects + specifying channels for which the response is intended. If **channels** is + present, the response is intended for a built-in integration and should not be + handled by an API client. + :param dict channel_options: (optional) For internal use only. + :param str alt_text: (optional) Descriptive text that can be used for screen + readers or other situations where the audio player cannot be seen. """ - def __init__(self, - name, - result_variable, - action_type=None, - parameters=None, - credentials=None): + def __init__( + self, + response_type: str, + source: str, + *, + title: Optional[str] = None, + description: Optional[str] = None, + channels: Optional[List['ResponseGenericChannel']] = None, + channel_options: Optional[dict] = None, + alt_text: Optional[str] = None, + ) -> None: """ - Initialize a DialogNodeAction object. + Initialize a RuntimeResponseGenericRuntimeResponseTypeAudio object. - :param str name: The name of the action. - :param str result_variable: The location in the dialog context where the result of - the action is stored. - :param str action_type: (optional) The type of action to invoke. - :param dict parameters: (optional) A map of key/value pairs to be provided to the - action. - :param str credentials: (optional) The name of the context variable that the - client application will use to pass in credentials for the action. + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + :param str source: The `https:` URL of the audio clip. + :param str title: (optional) The title or introductory text to show before + the response. + :param str description: (optional) The description to show with the the + response. + :param List[ResponseGenericChannel] channels: (optional) An array of + objects specifying channels for which the response is intended. If + **channels** is present, the response is intended for a built-in + integration and should not be handled by an API client. + :param dict channel_options: (optional) For internal use only. + :param str alt_text: (optional) Descriptive text that can be used for + screen readers or other situations where the audio player cannot be seen. """ - self.name = name - self.action_type = action_type - self.parameters = parameters - self.result_variable = result_variable - self.credentials = credentials + # pylint: disable=super-init-not-called + self.response_type = response_type + self.source = source + self.title = title + self.description = description + self.channels = channels + self.channel_options = channel_options + self.alt_text = alt_text @classmethod - def _from_dict(cls, _dict): - """Initialize a DialogNodeAction object from a json dictionary.""" + def from_dict( + cls, + _dict: Dict) -> 'RuntimeResponseGenericRuntimeResponseTypeAudio': + """Initialize a RuntimeResponseGenericRuntimeResponseTypeAudio object from a json dictionary.""" args = {} - if 'name' in _dict: - args['name'] = _dict.get('name') + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type else: raise ValueError( - 'Required property \'name\' not present in DialogNodeAction JSON' + 'Required property \'response_type\' not present in RuntimeResponseGenericRuntimeResponseTypeAudio JSON' ) - if 'type' in _dict or 'action_type' in _dict: - args['action_type'] = _dict.get('type') or _dict.get('action_type') - if 'parameters' in _dict: - args['parameters'] = _dict.get('parameters') - if 'result_variable' in _dict: - args['result_variable'] = _dict.get('result_variable') + if (source := _dict.get('source')) is not None: + args['source'] = source else: raise ValueError( - 'Required property \'result_variable\' not present in DialogNodeAction JSON' + 'Required property \'source\' not present in RuntimeResponseGenericRuntimeResponseTypeAudio JSON' ) - if 'credentials' in _dict: - args['credentials'] = _dict.get('credentials') + if (title := _dict.get('title')) is not None: + args['title'] = title + if (description := _dict.get('description')) is not None: + args['description'] = description + if (channels := _dict.get('channels')) is not None: + args['channels'] = [ + ResponseGenericChannel.from_dict(v) for v in channels + ] + if (channel_options := _dict.get('channel_options')) is not None: + args['channel_options'] = channel_options + if (alt_text := _dict.get('alt_text')) is not None: + args['alt_text'] = alt_text return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a RuntimeResponseGenericRuntimeResponseTypeAudio object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'name') and self.name is not None: - _dict['name'] = self.name - if hasattr(self, 'action_type') and self.action_type is not None: - _dict['type'] = self.action_type - if hasattr(self, 'parameters') and self.parameters is not None: - _dict['parameters'] = self.parameters + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, 'source') and self.source is not None: + _dict['source'] = self.source + if hasattr(self, 'title') and self.title is not None: + _dict['title'] = self.title + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'channels') and self.channels is not None: + channels_list = [] + for v in self.channels: + if isinstance(v, dict): + channels_list.append(v) + else: + channels_list.append(v.to_dict()) + _dict['channels'] = channels_list if hasattr(self, - 'result_variable') and self.result_variable is not None: - _dict['result_variable'] = self.result_variable - if hasattr(self, 'credentials') and self.credentials is not None: - _dict['credentials'] = self.credentials + 'channel_options') and self.channel_options is not None: + _dict['channel_options'] = self.channel_options + if hasattr(self, 'alt_text') and self.alt_text is not None: + _dict['alt_text'] = self.alt_text return _dict - def __str__(self): - """Return a `str` version of this DialogNodeAction object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __eq__(self, other): + def __str__(self) -> str: + """Return a `str` version of this RuntimeResponseGenericRuntimeResponseTypeAudio object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, + other: 'RuntimeResponseGenericRuntimeResponseTypeAudio') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, + other: 'RuntimeResponseGenericRuntimeResponseTypeAudio') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class DialogNodeOutputOptionsElement(object): +class RuntimeResponseGenericRuntimeResponseTypeChannelTransfer( + RuntimeResponseGeneric): """ - DialogNodeOutputOptionsElement. + RuntimeResponseGenericRuntimeResponseTypeChannelTransfer. - :attr str label: The user-facing label for the option. - :attr DialogNodeOutputOptionsElementValue value: An object defining the message input - to be sent to the assistant if the user selects the corresponding option. + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + **Note:** The `channel_transfer` response type is not supported on IBM Cloud + Pak for Data. + :param str message_to_user: The message to display to the user when initiating a + channel transfer. + :param ChannelTransferInfo transfer_info: Information used by an integration to + transfer the conversation to a different channel. + :param List[ResponseGenericChannel] channels: (optional) An array of objects + specifying channels for which the response is intended. If **channels** is + present, the response is intended for a built-in integration and should not be + handled by an API client. """ - def __init__(self, label, value): + def __init__( + self, + response_type: str, + message_to_user: str, + transfer_info: 'ChannelTransferInfo', + *, + channels: Optional[List['ResponseGenericChannel']] = None, + ) -> None: """ - Initialize a DialogNodeOutputOptionsElement object. + Initialize a RuntimeResponseGenericRuntimeResponseTypeChannelTransfer object. - :param str label: The user-facing label for the option. - :param DialogNodeOutputOptionsElementValue value: An object defining the message - input to be sent to the assistant if the user selects the corresponding option. + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + **Note:** The `channel_transfer` response type is not supported on IBM + Cloud Pak for Data. + :param str message_to_user: The message to display to the user when + initiating a channel transfer. + :param ChannelTransferInfo transfer_info: Information used by an + integration to transfer the conversation to a different channel. + :param List[ResponseGenericChannel] channels: (optional) An array of + objects specifying channels for which the response is intended. If + **channels** is present, the response is intended for a built-in + integration and should not be handled by an API client. """ - self.label = label - self.value = value + # pylint: disable=super-init-not-called + self.response_type = response_type + self.message_to_user = message_to_user + self.transfer_info = transfer_info + self.channels = channels @classmethod - def _from_dict(cls, _dict): - """Initialize a DialogNodeOutputOptionsElement object from a json dictionary.""" + def from_dict( + cls, _dict: Dict + ) -> 'RuntimeResponseGenericRuntimeResponseTypeChannelTransfer': + """Initialize a RuntimeResponseGenericRuntimeResponseTypeChannelTransfer object from a json dictionary.""" args = {} - if 'label' in _dict: - args['label'] = _dict.get('label') + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type else: raise ValueError( - 'Required property \'label\' not present in DialogNodeOutputOptionsElement JSON' + 'Required property \'response_type\' not present in RuntimeResponseGenericRuntimeResponseTypeChannelTransfer JSON' ) - if 'value' in _dict: - args['value'] = DialogNodeOutputOptionsElementValue._from_dict( - _dict.get('value')) + if (message_to_user := _dict.get('message_to_user')) is not None: + args['message_to_user'] = message_to_user else: raise ValueError( - 'Required property \'value\' not present in DialogNodeOutputOptionsElement JSON' + 'Required property \'message_to_user\' not present in RuntimeResponseGenericRuntimeResponseTypeChannelTransfer JSON' + ) + if (transfer_info := _dict.get('transfer_info')) is not None: + args['transfer_info'] = ChannelTransferInfo.from_dict(transfer_info) + else: + raise ValueError( + 'Required property \'transfer_info\' not present in RuntimeResponseGenericRuntimeResponseTypeChannelTransfer JSON' ) + if (channels := _dict.get('channels')) is not None: + args['channels'] = [ + ResponseGenericChannel.from_dict(v) for v in channels + ] return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a RuntimeResponseGenericRuntimeResponseTypeChannelTransfer object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'label') and self.label is not None: - _dict['label'] = self.label - if hasattr(self, 'value') and self.value is not None: - _dict['value'] = self.value._to_dict() + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, + 'message_to_user') and self.message_to_user is not None: + _dict['message_to_user'] = self.message_to_user + if hasattr(self, 'transfer_info') and self.transfer_info is not None: + if isinstance(self.transfer_info, dict): + _dict['transfer_info'] = self.transfer_info + else: + _dict['transfer_info'] = self.transfer_info.to_dict() + if hasattr(self, 'channels') and self.channels is not None: + channels_list = [] + for v in self.channels: + if isinstance(v, dict): + channels_list.append(v) + else: + channels_list.append(v.to_dict()) + _dict['channels'] = channels_list return _dict - def __str__(self): - """Return a `str` version of this DialogNodeOutputOptionsElement object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this RuntimeResponseGenericRuntimeResponseTypeChannelTransfer object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__( + self, other: 'RuntimeResponseGenericRuntimeResponseTypeChannelTransfer' + ) -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__( + self, other: 'RuntimeResponseGenericRuntimeResponseTypeChannelTransfer' + ) -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class DialogNodeOutputOptionsElementValue(object): +class RuntimeResponseGenericRuntimeResponseTypeConnectToAgent( + RuntimeResponseGeneric): """ - An object defining the message input to be sent to the assistant if the user selects - the corresponding option. + RuntimeResponseGenericRuntimeResponseTypeConnectToAgent. - :attr MessageInput input: (optional) An input object that includes the input text. + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + :param str message_to_human_agent: (optional) A message to be sent to the human + agent who will be taking over the conversation. + :param AgentAvailabilityMessage agent_available: (optional) An optional message + to be displayed to the user to indicate that the conversation will be + transferred to the next available agent. + :param AgentAvailabilityMessage agent_unavailable: (optional) An optional + message to be displayed to the user to indicate that no online agent is + available to take over the conversation. + :param DialogNodeOutputConnectToAgentTransferInfo transfer_info: (optional) + Routing or other contextual information to be used by target service desk + systems. + :param str topic: (optional) A label identifying the topic of the conversation, + derived from the **title** property of the relevant node or the **topic** + property of the dialog node response. + :param List[ResponseGenericChannel] channels: (optional) An array of objects + specifying channels for which the response is intended. If **channels** is + present, the response is intended for a built-in integration and should not be + handled by an API client. """ - def __init__(self, input=None): + def __init__( + self, + response_type: str, + *, + message_to_human_agent: Optional[str] = None, + agent_available: Optional['AgentAvailabilityMessage'] = None, + agent_unavailable: Optional['AgentAvailabilityMessage'] = None, + transfer_info: Optional[ + 'DialogNodeOutputConnectToAgentTransferInfo'] = None, + topic: Optional[str] = None, + channels: Optional[List['ResponseGenericChannel']] = None, + ) -> None: """ - Initialize a DialogNodeOutputOptionsElementValue object. + Initialize a RuntimeResponseGenericRuntimeResponseTypeConnectToAgent object. - :param MessageInput input: (optional) An input object that includes the input - text. + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + :param str message_to_human_agent: (optional) A message to be sent to the + human agent who will be taking over the conversation. + :param AgentAvailabilityMessage agent_available: (optional) An optional + message to be displayed to the user to indicate that the conversation will + be transferred to the next available agent. + :param AgentAvailabilityMessage agent_unavailable: (optional) An optional + message to be displayed to the user to indicate that no online agent is + available to take over the conversation. + :param DialogNodeOutputConnectToAgentTransferInfo transfer_info: (optional) + Routing or other contextual information to be used by target service desk + systems. + :param str topic: (optional) A label identifying the topic of the + conversation, derived from the **title** property of the relevant node or + the **topic** property of the dialog node response. + :param List[ResponseGenericChannel] channels: (optional) An array of + objects specifying channels for which the response is intended. If + **channels** is present, the response is intended for a built-in + integration and should not be handled by an API client. """ - self.input = input + # pylint: disable=super-init-not-called + self.response_type = response_type + self.message_to_human_agent = message_to_human_agent + self.agent_available = agent_available + self.agent_unavailable = agent_unavailable + self.transfer_info = transfer_info + self.topic = topic + self.channels = channels @classmethod - def _from_dict(cls, _dict): - """Initialize a DialogNodeOutputOptionsElementValue object from a json dictionary.""" + def from_dict( + cls, _dict: Dict + ) -> 'RuntimeResponseGenericRuntimeResponseTypeConnectToAgent': + """Initialize a RuntimeResponseGenericRuntimeResponseTypeConnectToAgent object from a json dictionary.""" args = {} - if 'input' in _dict: - args['input'] = MessageInput._from_dict(_dict.get('input')) + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type + else: + raise ValueError( + 'Required property \'response_type\' not present in RuntimeResponseGenericRuntimeResponseTypeConnectToAgent JSON' + ) + if (message_to_human_agent := + _dict.get('message_to_human_agent')) is not None: + args['message_to_human_agent'] = message_to_human_agent + if (agent_available := _dict.get('agent_available')) is not None: + args['agent_available'] = AgentAvailabilityMessage.from_dict( + agent_available) + if (agent_unavailable := _dict.get('agent_unavailable')) is not None: + args['agent_unavailable'] = AgentAvailabilityMessage.from_dict( + agent_unavailable) + if (transfer_info := _dict.get('transfer_info')) is not None: + args[ + 'transfer_info'] = DialogNodeOutputConnectToAgentTransferInfo.from_dict( + transfer_info) + if (topic := _dict.get('topic')) is not None: + args['topic'] = topic + if (channels := _dict.get('channels')) is not None: + args['channels'] = [ + ResponseGenericChannel.from_dict(v) for v in channels + ] return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a RuntimeResponseGenericRuntimeResponseTypeConnectToAgent object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'input') and self.input is not None: - _dict['input'] = self.input._to_dict() + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, 'message_to_human_agent' + ) and self.message_to_human_agent is not None: + _dict['message_to_human_agent'] = self.message_to_human_agent + if hasattr(self, + 'agent_available') and self.agent_available is not None: + if isinstance(self.agent_available, dict): + _dict['agent_available'] = self.agent_available + else: + _dict['agent_available'] = self.agent_available.to_dict() + if hasattr(self, + 'agent_unavailable') and self.agent_unavailable is not None: + if isinstance(self.agent_unavailable, dict): + _dict['agent_unavailable'] = self.agent_unavailable + else: + _dict['agent_unavailable'] = self.agent_unavailable.to_dict() + if hasattr(self, 'transfer_info') and self.transfer_info is not None: + if isinstance(self.transfer_info, dict): + _dict['transfer_info'] = self.transfer_info + else: + _dict['transfer_info'] = self.transfer_info.to_dict() + if hasattr(self, 'topic') and self.topic is not None: + _dict['topic'] = self.topic + if hasattr(self, 'channels') and self.channels is not None: + channels_list = [] + for v in self.channels: + if isinstance(v, dict): + channels_list.append(v) + else: + channels_list.append(v.to_dict()) + _dict['channels'] = channels_list return _dict - def __str__(self): - """Return a `str` version of this DialogNodeOutputOptionsElementValue object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this RuntimeResponseGenericRuntimeResponseTypeConnectToAgent object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__( + self, other: 'RuntimeResponseGenericRuntimeResponseTypeConnectToAgent' + ) -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__( + self, other: 'RuntimeResponseGenericRuntimeResponseTypeConnectToAgent' + ) -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class DialogNodesVisited(object): +class RuntimeResponseGenericRuntimeResponseTypeConversationalSearch( + RuntimeResponseGeneric): """ - DialogNodesVisited. + RuntimeResponseGenericRuntimeResponseTypeConversationalSearch. - :attr str dialog_node: (optional) A dialog node that was triggered during processing - of the input message. - :attr str title: (optional) The title of the dialog node. - :attr str conditions: (optional) The conditions that trigger the dialog node. + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + :param str text: The text of the conversational search response. + :param str citations_title: The title of the citations. The default is “How do + we know?”. It can be updated in the conversational search user interface. + :param List[ResponseGenericCitation] citations: The citations for the generated + response. + :param ResponseGenericConfidenceScores confidence_scores: The confidence scores + for determining whether to show the generated response or an “I don't know” + response. + :param str response_length_option: The response length option. It is used to + control the length of the generated response. It is configured either in the + user interface or through the Update skill API. For more information, see + [watsonx Assistant documentation]( + https://cloud.ibm.com/docs/watson-assistant?topic=watson-assistant-conversational-search#tuning-the-generated-response-length-in-conversational-search). + :param List[SearchResults] search_results: An array of objects containing the + search results. + :param str disclaimer: A disclaimer for the conversational search response. """ - def __init__(self, dialog_node=None, title=None, conditions=None): + def __init__( + self, + response_type: str, + text: str, + citations_title: str, + citations: List['ResponseGenericCitation'], + confidence_scores: 'ResponseGenericConfidenceScores', + response_length_option: str, + search_results: List['SearchResults'], + disclaimer: str, + ) -> None: """ - Initialize a DialogNodesVisited object. + Initialize a RuntimeResponseGenericRuntimeResponseTypeConversationalSearch object. - :param str dialog_node: (optional) A dialog node that was triggered during - processing of the input message. - :param str title: (optional) The title of the dialog node. - :param str conditions: (optional) The conditions that trigger the dialog node. + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + :param str text: The text of the conversational search response. + :param str citations_title: The title of the citations. The default is “How + do we know?”. It can be updated in the conversational search user + interface. + :param List[ResponseGenericCitation] citations: The citations for the + generated response. + :param ResponseGenericConfidenceScores confidence_scores: The confidence + scores for determining whether to show the generated response or an “I + don't know” response. + :param str response_length_option: The response length option. It is used + to control the length of the generated response. It is configured either in + the user interface or through the Update skill API. For more information, + see [watsonx Assistant documentation]( + https://cloud.ibm.com/docs/watson-assistant?topic=watson-assistant-conversational-search#tuning-the-generated-response-length-in-conversational-search). + :param List[SearchResults] search_results: An array of objects containing + the search results. + :param str disclaimer: A disclaimer for the conversational search response. """ - self.dialog_node = dialog_node - self.title = title - self.conditions = conditions + # pylint: disable=super-init-not-called + self.response_type = response_type + self.text = text + self.citations_title = citations_title + self.citations = citations + self.confidence_scores = confidence_scores + self.response_length_option = response_length_option + self.search_results = search_results + self.disclaimer = disclaimer @classmethod - def _from_dict(cls, _dict): - """Initialize a DialogNodesVisited object from a json dictionary.""" + def from_dict( + cls, _dict: Dict + ) -> 'RuntimeResponseGenericRuntimeResponseTypeConversationalSearch': + """Initialize a RuntimeResponseGenericRuntimeResponseTypeConversationalSearch object from a json dictionary.""" args = {} - if 'dialog_node' in _dict: - args['dialog_node'] = _dict.get('dialog_node') - if 'title' in _dict: - args['title'] = _dict.get('title') - if 'conditions' in _dict: - args['conditions'] = _dict.get('conditions') + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type + else: + raise ValueError( + 'Required property \'response_type\' not present in RuntimeResponseGenericRuntimeResponseTypeConversationalSearch JSON' + ) + if (text := _dict.get('text')) is not None: + args['text'] = text + else: + raise ValueError( + 'Required property \'text\' not present in RuntimeResponseGenericRuntimeResponseTypeConversationalSearch JSON' + ) + if (citations_title := _dict.get('citations_title')) is not None: + args['citations_title'] = citations_title + else: + raise ValueError( + 'Required property \'citations_title\' not present in RuntimeResponseGenericRuntimeResponseTypeConversationalSearch JSON' + ) + if (citations := _dict.get('citations')) is not None: + args['citations'] = [ + ResponseGenericCitation.from_dict(v) for v in citations + ] + else: + raise ValueError( + 'Required property \'citations\' not present in RuntimeResponseGenericRuntimeResponseTypeConversationalSearch JSON' + ) + if (confidence_scores := _dict.get('confidence_scores')) is not None: + args[ + 'confidence_scores'] = ResponseGenericConfidenceScores.from_dict( + confidence_scores) + else: + raise ValueError( + 'Required property \'confidence_scores\' not present in RuntimeResponseGenericRuntimeResponseTypeConversationalSearch JSON' + ) + if (response_length_option := + _dict.get('response_length_option')) is not None: + args['response_length_option'] = response_length_option + else: + raise ValueError( + 'Required property \'response_length_option\' not present in RuntimeResponseGenericRuntimeResponseTypeConversationalSearch JSON' + ) + if (search_results := _dict.get('search_results')) is not None: + args['search_results'] = [ + SearchResults.from_dict(v) for v in search_results + ] + else: + raise ValueError( + 'Required property \'search_results\' not present in RuntimeResponseGenericRuntimeResponseTypeConversationalSearch JSON' + ) + if (disclaimer := _dict.get('disclaimer')) is not None: + args['disclaimer'] = disclaimer + else: + raise ValueError( + 'Required property \'disclaimer\' not present in RuntimeResponseGenericRuntimeResponseTypeConversationalSearch JSON' + ) return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a RuntimeResponseGenericRuntimeResponseTypeConversationalSearch object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'dialog_node') and self.dialog_node is not None: - _dict['dialog_node'] = self.dialog_node - if hasattr(self, 'title') and self.title is not None: - _dict['title'] = self.title - if hasattr(self, 'conditions') and self.conditions is not None: - _dict['conditions'] = self.conditions + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, 'text') and self.text is not None: + _dict['text'] = self.text + if hasattr(self, + 'citations_title') and self.citations_title is not None: + _dict['citations_title'] = self.citations_title + if hasattr(self, 'citations') and self.citations is not None: + citations_list = [] + for v in self.citations: + if isinstance(v, dict): + citations_list.append(v) + else: + citations_list.append(v.to_dict()) + _dict['citations'] = citations_list + if hasattr(self, + 'confidence_scores') and self.confidence_scores is not None: + if isinstance(self.confidence_scores, dict): + _dict['confidence_scores'] = self.confidence_scores + else: + _dict['confidence_scores'] = self.confidence_scores.to_dict() + if hasattr(self, 'response_length_option' + ) and self.response_length_option is not None: + _dict['response_length_option'] = self.response_length_option + if hasattr(self, 'search_results') and self.search_results is not None: + search_results_list = [] + for v in self.search_results: + if isinstance(v, dict): + search_results_list.append(v) + else: + search_results_list.append(v.to_dict()) + _dict['search_results'] = search_results_list + if hasattr(self, 'disclaimer') and self.disclaimer is not None: + _dict['disclaimer'] = self.disclaimer return _dict - def __str__(self): - """Return a `str` version of this DialogNodesVisited object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this RuntimeResponseGenericRuntimeResponseTypeConversationalSearch object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__( + self, + other: 'RuntimeResponseGenericRuntimeResponseTypeConversationalSearch' + ) -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__( + self, + other: 'RuntimeResponseGenericRuntimeResponseTypeConversationalSearch' + ) -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class DialogRuntimeResponseGeneric(object): +class RuntimeResponseGenericRuntimeResponseTypeDate(RuntimeResponseGeneric): """ - DialogRuntimeResponseGeneric. + RuntimeResponseGenericRuntimeResponseTypeDate. - :attr str response_type: The type of response returned by the dialog node. The - specified response type must be supported by the client application or channel. - **Note:** The **suggestion** response type is part of the disambiguation feature, - which is only available for Premium users. - :attr str text: (optional) The text of the response. - :attr int time: (optional) How long to pause, in milliseconds. - :attr bool typing: (optional) Whether to send a "user is typing" event during the - pause. - :attr str source: (optional) The URL of the image. - :attr str title: (optional) The title or introductory text to show before the - response. - :attr str description: (optional) The description to show with the the response. - :attr str preference: (optional) The preferred type of control to display. - :attr list[DialogNodeOutputOptionsElement] options: (optional) An array of objects - describing the options from which the user can choose. - :attr str message_to_human_agent: (optional) A message to be sent to the human agent - who will be taking over the conversation. - :attr str topic: (optional) A label identifying the topic of the conversation, derived - from the **user_label** property of the relevant node. - :attr list[DialogSuggestion] suggestions: (optional) An array of objects describing - the possible matching dialog nodes from which the user can choose. - **Note:** The **suggestions** property is part of the disambiguation feature, which is - only available for Premium users. + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. """ - def __init__(self, - response_type, - text=None, - time=None, - typing=None, - source=None, - title=None, - description=None, - preference=None, - options=None, - message_to_human_agent=None, - topic=None, - suggestions=None): + def __init__( + self, + response_type: str, + ) -> None: """ - Initialize a DialogRuntimeResponseGeneric object. + Initialize a RuntimeResponseGenericRuntimeResponseTypeDate object. - :param str response_type: The type of response returned by the dialog node. The - specified response type must be supported by the client application or channel. - **Note:** The **suggestion** response type is part of the disambiguation feature, - which is only available for Premium users. - :param str text: (optional) The text of the response. - :param int time: (optional) How long to pause, in milliseconds. - :param bool typing: (optional) Whether to send a "user is typing" event during the - pause. - :param str source: (optional) The URL of the image. - :param str title: (optional) The title or introductory text to show before the - response. - :param str description: (optional) The description to show with the the response. - :param str preference: (optional) The preferred type of control to display. - :param list[DialogNodeOutputOptionsElement] options: (optional) An array of - objects describing the options from which the user can choose. - :param str message_to_human_agent: (optional) A message to be sent to the human - agent who will be taking over the conversation. - :param str topic: (optional) A label identifying the topic of the conversation, - derived from the **user_label** property of the relevant node. - :param list[DialogSuggestion] suggestions: (optional) An array of objects - describing the possible matching dialog nodes from which the user can choose. - **Note:** The **suggestions** property is part of the disambiguation feature, - which is only available for Premium users. + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. """ + # pylint: disable=super-init-not-called self.response_type = response_type - self.text = text - self.time = time - self.typing = typing - self.source = source - self.title = title - self.description = description - self.preference = preference - self.options = options - self.message_to_human_agent = message_to_human_agent - self.topic = topic - self.suggestions = suggestions @classmethod - def _from_dict(cls, _dict): - """Initialize a DialogRuntimeResponseGeneric object from a json dictionary.""" + def from_dict( + cls, + _dict: Dict) -> 'RuntimeResponseGenericRuntimeResponseTypeDate': + """Initialize a RuntimeResponseGenericRuntimeResponseTypeDate object from a json dictionary.""" args = {} - if 'response_type' in _dict: - args['response_type'] = _dict.get('response_type') + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type else: raise ValueError( - 'Required property \'response_type\' not present in DialogRuntimeResponseGeneric JSON' + 'Required property \'response_type\' not present in RuntimeResponseGenericRuntimeResponseTypeDate JSON' ) - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'time' in _dict: - args['time'] = _dict.get('time') - if 'typing' in _dict: - args['typing'] = _dict.get('typing') - if 'source' in _dict: - args['source'] = _dict.get('source') - if 'title' in _dict: - args['title'] = _dict.get('title') - if 'description' in _dict: - args['description'] = _dict.get('description') - if 'preference' in _dict: - args['preference'] = _dict.get('preference') - if 'options' in _dict: - args['options'] = [ - DialogNodeOutputOptionsElement._from_dict(x) - for x in (_dict.get('options')) - ] - if 'message_to_human_agent' in _dict: - args['message_to_human_agent'] = _dict.get('message_to_human_agent') - if 'topic' in _dict: - args['topic'] = _dict.get('topic') - if 'suggestions' in _dict: - args['suggestions'] = [ - DialogSuggestion._from_dict(x) - for x in (_dict.get('suggestions')) - ] return cls(**args) - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'response_type') and self.response_type is not None: - _dict['response_type'] = self.response_type - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - if hasattr(self, 'time') and self.time is not None: - _dict['time'] = self.time - if hasattr(self, 'typing') and self.typing is not None: - _dict['typing'] = self.typing - if hasattr(self, 'source') and self.source is not None: - _dict['source'] = self.source - if hasattr(self, 'title') and self.title is not None: - _dict['title'] = self.title - if hasattr(self, 'description') and self.description is not None: - _dict['description'] = self.description - if hasattr(self, 'preference') and self.preference is not None: - _dict['preference'] = self.preference - if hasattr(self, 'options') and self.options is not None: - _dict['options'] = [x._to_dict() for x in self.options] - if hasattr(self, 'message_to_human_agent' - ) and self.message_to_human_agent is not None: - _dict['message_to_human_agent'] = self.message_to_human_agent - if hasattr(self, 'topic') and self.topic is not None: - _dict['topic'] = self.topic - if hasattr(self, 'suggestions') and self.suggestions is not None: - _dict['suggestions'] = [x._to_dict() for x in self.suggestions] + @classmethod + def _from_dict(cls, _dict): + """Initialize a RuntimeResponseGenericRuntimeResponseTypeDate object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type return _dict - def __str__(self): - """Return a `str` version of this DialogRuntimeResponseGeneric object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __eq__(self, other): + def __str__(self) -> str: + """Return a `str` version of this RuntimeResponseGenericRuntimeResponseTypeDate object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, + other: 'RuntimeResponseGenericRuntimeResponseTypeDate') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, + other: 'RuntimeResponseGenericRuntimeResponseTypeDate') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class DialogSuggestion(object): +class RuntimeResponseGenericRuntimeResponseTypeDtmf(RuntimeResponseGeneric): """ - DialogSuggestion. + RuntimeResponseGenericRuntimeResponseTypeDtmf. - :attr str label: The user-facing label for the disambiguation option. This label is - taken from the **user_label** property of the corresponding dialog node. - :attr DialogSuggestionValue value: An object defining the message input to be sent to - the assistant if the user selects the corresponding disambiguation option. - :attr dict output: (optional) The dialog output that will be returned from the Watson - Assistant service if the user selects the corresponding option. + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + :param DtmfCommandInfo command_info: (optional) + :param List[ResponseGenericChannel] channels: (optional) An array of objects + specifying channels for which the response is intended. If **channels** is + present, the response is intended for a built-in integration and should not be + handled by an API client. """ - def __init__(self, label, value, output=None): + def __init__( + self, + response_type: str, + *, + command_info: Optional['DtmfCommandInfo'] = None, + channels: Optional[List['ResponseGenericChannel']] = None, + ) -> None: """ - Initialize a DialogSuggestion object. + Initialize a RuntimeResponseGenericRuntimeResponseTypeDtmf object. - :param str label: The user-facing label for the disambiguation option. This label - is taken from the **user_label** property of the corresponding dialog node. - :param DialogSuggestionValue value: An object defining the message input to be - sent to the assistant if the user selects the corresponding disambiguation option. - :param dict output: (optional) The dialog output that will be returned from the - Watson Assistant service if the user selects the corresponding option. + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + :param DtmfCommandInfo command_info: (optional) + :param List[ResponseGenericChannel] channels: (optional) An array of + objects specifying channels for which the response is intended. If + **channels** is present, the response is intended for a built-in + integration and should not be handled by an API client. """ - self.label = label - self.value = value - self.output = output + # pylint: disable=super-init-not-called + self.response_type = response_type + self.command_info = command_info + self.channels = channels @classmethod - def _from_dict(cls, _dict): - """Initialize a DialogSuggestion object from a json dictionary.""" + def from_dict( + cls, + _dict: Dict) -> 'RuntimeResponseGenericRuntimeResponseTypeDtmf': + """Initialize a RuntimeResponseGenericRuntimeResponseTypeDtmf object from a json dictionary.""" args = {} - if 'label' in _dict: - args['label'] = _dict.get('label') + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type else: raise ValueError( - 'Required property \'label\' not present in DialogSuggestion JSON' - ) - if 'value' in _dict: - args['value'] = DialogSuggestionValue._from_dict(_dict.get('value')) - else: - raise ValueError( - 'Required property \'value\' not present in DialogSuggestion JSON' + 'Required property \'response_type\' not present in RuntimeResponseGenericRuntimeResponseTypeDtmf JSON' ) - if 'output' in _dict: - args['output'] = _dict.get('output') + if (command_info := _dict.get('command_info')) is not None: + args['command_info'] = DtmfCommandInfo.from_dict(command_info) + if (channels := _dict.get('channels')) is not None: + args['channels'] = [ + ResponseGenericChannel.from_dict(v) for v in channels + ] return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a RuntimeResponseGenericRuntimeResponseTypeDtmf object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'label') and self.label is not None: - _dict['label'] = self.label - if hasattr(self, 'value') and self.value is not None: - _dict['value'] = self.value._to_dict() - if hasattr(self, 'output') and self.output is not None: - _dict['output'] = self.output + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, 'command_info') and self.command_info is not None: + if isinstance(self.command_info, dict): + _dict['command_info'] = self.command_info + else: + _dict['command_info'] = self.command_info.to_dict() + if hasattr(self, 'channels') and self.channels is not None: + channels_list = [] + for v in self.channels: + if isinstance(v, dict): + channels_list.append(v) + else: + channels_list.append(v.to_dict()) + _dict['channels'] = channels_list return _dict - def __str__(self): - """Return a `str` version of this DialogSuggestion object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __eq__(self, other): + def __str__(self) -> str: + """Return a `str` version of this RuntimeResponseGenericRuntimeResponseTypeDtmf object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, + other: 'RuntimeResponseGenericRuntimeResponseTypeDtmf') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, + other: 'RuntimeResponseGenericRuntimeResponseTypeDtmf') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class DialogSuggestionValue(object): +class RuntimeResponseGenericRuntimeResponseTypeEndSession( + RuntimeResponseGeneric): """ - An object defining the message input to be sent to the assistant if the user selects - the corresponding disambiguation option. + RuntimeResponseGenericRuntimeResponseTypeEndSession. - :attr MessageInput input: (optional) An input object that includes the input text. + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + :param dict channel_options: (optional) For internal use only. """ - def __init__(self, input=None): + def __init__( + self, + response_type: str, + *, + channel_options: Optional[dict] = None, + ) -> None: """ - Initialize a DialogSuggestionValue object. + Initialize a RuntimeResponseGenericRuntimeResponseTypeEndSession object. - :param MessageInput input: (optional) An input object that includes the input - text. + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + :param dict channel_options: (optional) For internal use only. """ - self.input = input + # pylint: disable=super-init-not-called + self.response_type = response_type + self.channel_options = channel_options @classmethod - def _from_dict(cls, _dict): - """Initialize a DialogSuggestionValue object from a json dictionary.""" + def from_dict( + cls, _dict: Dict + ) -> 'RuntimeResponseGenericRuntimeResponseTypeEndSession': + """Initialize a RuntimeResponseGenericRuntimeResponseTypeEndSession object from a json dictionary.""" args = {} - if 'input' in _dict: - args['input'] = MessageInput._from_dict(_dict.get('input')) + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type + else: + raise ValueError( + 'Required property \'response_type\' not present in RuntimeResponseGenericRuntimeResponseTypeEndSession JSON' + ) + if (channel_options := _dict.get('channel_options')) is not None: + args['channel_options'] = channel_options return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a RuntimeResponseGenericRuntimeResponseTypeEndSession object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'input') and self.input is not None: - _dict['input'] = self.input._to_dict() + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, + 'channel_options') and self.channel_options is not None: + _dict['channel_options'] = self.channel_options return _dict - def __str__(self): - """Return a `str` version of this DialogSuggestionValue object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this RuntimeResponseGenericRuntimeResponseTypeEndSession object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__( + self, other: 'RuntimeResponseGenericRuntimeResponseTypeEndSession' + ) -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__( + self, other: 'RuntimeResponseGenericRuntimeResponseTypeEndSession' + ) -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class MessageContext(object): +class RuntimeResponseGenericRuntimeResponseTypeIframe(RuntimeResponseGeneric): """ - MessageContext. + RuntimeResponseGenericRuntimeResponseTypeIframe. - :attr MessageContextGlobal global_: (optional) Information that is shared by all - skills used by the Assistant. - :attr MessageContextSkills skills: (optional) Information specific to particular - skills used by the Assistant. - **Note:** Currently, only a single property named `main skill` is supported. This - object contains variables that apply to the dialog skill used by the assistant. + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + :param str source: The `https:` URL of the embeddable content. + :param str title: (optional) The title or introductory text to show before the + response. + :param str description: (optional) The description to show with the the + response. + :param str image_url: (optional) The URL of an image that shows a preview of the + embedded content. + :param List[ResponseGenericChannel] channels: (optional) An array of objects + specifying channels for which the response is intended. If **channels** is + present, the response is intended for a built-in integration and should not be + handled by an API client. """ - def __init__(self, global_=None, skills=None): + def __init__( + self, + response_type: str, + source: str, + *, + title: Optional[str] = None, + description: Optional[str] = None, + image_url: Optional[str] = None, + channels: Optional[List['ResponseGenericChannel']] = None, + ) -> None: """ - Initialize a MessageContext object. + Initialize a RuntimeResponseGenericRuntimeResponseTypeIframe object. - :param MessageContextGlobal global_: (optional) Information that is shared by all - skills used by the Assistant. - :param MessageContextSkills skills: (optional) Information specific to particular - skills used by the Assistant. - **Note:** Currently, only a single property named `main skill` is supported. This - object contains variables that apply to the dialog skill used by the assistant. + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + :param str source: The `https:` URL of the embeddable content. + :param str title: (optional) The title or introductory text to show before + the response. + :param str description: (optional) The description to show with the the + response. + :param str image_url: (optional) The URL of an image that shows a preview + of the embedded content. + :param List[ResponseGenericChannel] channels: (optional) An array of + objects specifying channels for which the response is intended. If + **channels** is present, the response is intended for a built-in + integration and should not be handled by an API client. """ - self.global_ = global_ - self.skills = skills + # pylint: disable=super-init-not-called + self.response_type = response_type + self.source = source + self.title = title + self.description = description + self.image_url = image_url + self.channels = channels @classmethod - def _from_dict(cls, _dict): - """Initialize a MessageContext object from a json dictionary.""" + def from_dict( + cls, + _dict: Dict) -> 'RuntimeResponseGenericRuntimeResponseTypeIframe': + """Initialize a RuntimeResponseGenericRuntimeResponseTypeIframe object from a json dictionary.""" args = {} - if 'global' in _dict: - args['global_'] = MessageContextGlobal._from_dict( - _dict.get('global')) - if 'skills' in _dict: - args['skills'] = MessageContextSkills._from_dict( - _dict.get('skills')) + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type + else: + raise ValueError( + 'Required property \'response_type\' not present in RuntimeResponseGenericRuntimeResponseTypeIframe JSON' + ) + if (source := _dict.get('source')) is not None: + args['source'] = source + else: + raise ValueError( + 'Required property \'source\' not present in RuntimeResponseGenericRuntimeResponseTypeIframe JSON' + ) + if (title := _dict.get('title')) is not None: + args['title'] = title + if (description := _dict.get('description')) is not None: + args['description'] = description + if (image_url := _dict.get('image_url')) is not None: + args['image_url'] = image_url + if (channels := _dict.get('channels')) is not None: + args['channels'] = [ + ResponseGenericChannel.from_dict(v) for v in channels + ] return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a RuntimeResponseGenericRuntimeResponseTypeIframe object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'global_') and self.global_ is not None: - _dict['global'] = self.global_._to_dict() - if hasattr(self, 'skills') and self.skills is not None: - _dict['skills'] = self.skills._to_dict() + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, 'source') and self.source is not None: + _dict['source'] = self.source + if hasattr(self, 'title') and self.title is not None: + _dict['title'] = self.title + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'image_url') and self.image_url is not None: + _dict['image_url'] = self.image_url + if hasattr(self, 'channels') and self.channels is not None: + channels_list = [] + for v in self.channels: + if isinstance(v, dict): + channels_list.append(v) + else: + channels_list.append(v.to_dict()) + _dict['channels'] = channels_list return _dict - def __str__(self): - """Return a `str` version of this MessageContext object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this RuntimeResponseGenericRuntimeResponseTypeIframe object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__( + self, + other: 'RuntimeResponseGenericRuntimeResponseTypeIframe') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__( + self, + other: 'RuntimeResponseGenericRuntimeResponseTypeIframe') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class MessageContextGlobal(object): +class RuntimeResponseGenericRuntimeResponseTypeImage(RuntimeResponseGeneric): """ - Information that is shared by all skills used by the Assistant. + RuntimeResponseGenericRuntimeResponseTypeImage. - :attr MessageContextGlobalSystem system: (optional) Built-in system properties that - apply to all skills used by the assistant. + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + :param str source: The `https:` URL of the image. + :param str title: (optional) The title to show before the response. + :param str description: (optional) The description to show with the the + response. + :param List[ResponseGenericChannel] channels: (optional) An array of objects + specifying channels for which the response is intended. If **channels** is + present, the response is intended for a built-in integration and should not be + handled by an API client. + :param str alt_text: (optional) Descriptive text that can be used for screen + readers or other situations where the image cannot be seen. """ - def __init__(self, system=None): + def __init__( + self, + response_type: str, + source: str, + *, + title: Optional[str] = None, + description: Optional[str] = None, + channels: Optional[List['ResponseGenericChannel']] = None, + alt_text: Optional[str] = None, + ) -> None: """ - Initialize a MessageContextGlobal object. + Initialize a RuntimeResponseGenericRuntimeResponseTypeImage object. - :param MessageContextGlobalSystem system: (optional) Built-in system properties - that apply to all skills used by the assistant. + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + :param str source: The `https:` URL of the image. + :param str title: (optional) The title to show before the response. + :param str description: (optional) The description to show with the the + response. + :param List[ResponseGenericChannel] channels: (optional) An array of + objects specifying channels for which the response is intended. If + **channels** is present, the response is intended for a built-in + integration and should not be handled by an API client. + :param str alt_text: (optional) Descriptive text that can be used for + screen readers or other situations where the image cannot be seen. """ - self.system = system + # pylint: disable=super-init-not-called + self.response_type = response_type + self.source = source + self.title = title + self.description = description + self.channels = channels + self.alt_text = alt_text @classmethod - def _from_dict(cls, _dict): - """Initialize a MessageContextGlobal object from a json dictionary.""" + def from_dict( + cls, + _dict: Dict) -> 'RuntimeResponseGenericRuntimeResponseTypeImage': + """Initialize a RuntimeResponseGenericRuntimeResponseTypeImage object from a json dictionary.""" args = {} - if 'system' in _dict: - args['system'] = MessageContextGlobalSystem._from_dict( - _dict.get('system')) + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type + else: + raise ValueError( + 'Required property \'response_type\' not present in RuntimeResponseGenericRuntimeResponseTypeImage JSON' + ) + if (source := _dict.get('source')) is not None: + args['source'] = source + else: + raise ValueError( + 'Required property \'source\' not present in RuntimeResponseGenericRuntimeResponseTypeImage JSON' + ) + if (title := _dict.get('title')) is not None: + args['title'] = title + if (description := _dict.get('description')) is not None: + args['description'] = description + if (channels := _dict.get('channels')) is not None: + args['channels'] = [ + ResponseGenericChannel.from_dict(v) for v in channels + ] + if (alt_text := _dict.get('alt_text')) is not None: + args['alt_text'] = alt_text return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a RuntimeResponseGenericRuntimeResponseTypeImage object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'system') and self.system is not None: - _dict['system'] = self.system._to_dict() + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, 'source') and self.source is not None: + _dict['source'] = self.source + if hasattr(self, 'title') and self.title is not None: + _dict['title'] = self.title + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'channels') and self.channels is not None: + channels_list = [] + for v in self.channels: + if isinstance(v, dict): + channels_list.append(v) + else: + channels_list.append(v.to_dict()) + _dict['channels'] = channels_list + if hasattr(self, 'alt_text') and self.alt_text is not None: + _dict['alt_text'] = self.alt_text return _dict - def __str__(self): - """Return a `str` version of this MessageContextGlobal object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __eq__(self, other): + def __str__(self) -> str: + """Return a `str` version of this RuntimeResponseGenericRuntimeResponseTypeImage object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, + other: 'RuntimeResponseGenericRuntimeResponseTypeImage') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, + other: 'RuntimeResponseGenericRuntimeResponseTypeImage') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class MessageContextGlobalSystem(object): +class RuntimeResponseGenericRuntimeResponseTypeOption(RuntimeResponseGeneric): """ - Built-in system properties that apply to all skills used by the assistant. + RuntimeResponseGenericRuntimeResponseTypeOption. - :attr str timezone: (optional) The user time zone. The assistant uses the time zone to - correctly resolve relative time references. - :attr str user_id: (optional) A string value that identifies the user who is - interacting with the assistant. The client must provide a unique identifier for each - individual end user who accesses the application. For Plus and Premium plans, this - user ID is used to identify unique users for billing purposes. This string cannot - contain carriage return, newline, or tab characters. - :attr int turn_count: (optional) A counter that is automatically incremented with each - turn of the conversation. A value of 1 indicates that this is the the first turn of a - new conversation, which can affect the behavior of some skills (for example, - triggering the start node of a dialog). + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + :param str title: The title or introductory text to show before the response. + :param str description: (optional) The description to show with the the + response. + :param str preference: (optional) The preferred type of control to display. + :param List[DialogNodeOutputOptionsElement] options: An array of objects + describing the options from which the user can choose. + :param List[ResponseGenericChannel] channels: (optional) An array of objects + specifying channels for which the response is intended. If **channels** is + present, the response is intended for a built-in integration and should not be + handled by an API client. """ - def __init__(self, timezone=None, user_id=None, turn_count=None): + def __init__( + self, + response_type: str, + title: str, + options: List['DialogNodeOutputOptionsElement'], + *, + description: Optional[str] = None, + preference: Optional[str] = None, + channels: Optional[List['ResponseGenericChannel']] = None, + ) -> None: """ - Initialize a MessageContextGlobalSystem object. + Initialize a RuntimeResponseGenericRuntimeResponseTypeOption object. - :param str timezone: (optional) The user time zone. The assistant uses the time - zone to correctly resolve relative time references. - :param str user_id: (optional) A string value that identifies the user who is - interacting with the assistant. The client must provide a unique identifier for - each individual end user who accesses the application. For Plus and Premium plans, - this user ID is used to identify unique users for billing purposes. This string - cannot contain carriage return, newline, or tab characters. - :param int turn_count: (optional) A counter that is automatically incremented with - each turn of the conversation. A value of 1 indicates that this is the the first - turn of a new conversation, which can affect the behavior of some skills (for - example, triggering the start node of a dialog). + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + :param str title: The title or introductory text to show before the + response. + :param List[DialogNodeOutputOptionsElement] options: An array of objects + describing the options from which the user can choose. + :param str description: (optional) The description to show with the the + response. + :param str preference: (optional) The preferred type of control to display. + :param List[ResponseGenericChannel] channels: (optional) An array of + objects specifying channels for which the response is intended. If + **channels** is present, the response is intended for a built-in + integration and should not be handled by an API client. """ - self.timezone = timezone - self.user_id = user_id - self.turn_count = turn_count + # pylint: disable=super-init-not-called + self.response_type = response_type + self.title = title + self.description = description + self.preference = preference + self.options = options + self.channels = channels @classmethod - def _from_dict(cls, _dict): - """Initialize a MessageContextGlobalSystem object from a json dictionary.""" + def from_dict( + cls, + _dict: Dict) -> 'RuntimeResponseGenericRuntimeResponseTypeOption': + """Initialize a RuntimeResponseGenericRuntimeResponseTypeOption object from a json dictionary.""" args = {} - if 'timezone' in _dict: - args['timezone'] = _dict.get('timezone') - if 'user_id' in _dict: - args['user_id'] = _dict.get('user_id') - if 'turn_count' in _dict: - args['turn_count'] = _dict.get('turn_count') + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type + else: + raise ValueError( + 'Required property \'response_type\' not present in RuntimeResponseGenericRuntimeResponseTypeOption JSON' + ) + if (title := _dict.get('title')) is not None: + args['title'] = title + else: + raise ValueError( + 'Required property \'title\' not present in RuntimeResponseGenericRuntimeResponseTypeOption JSON' + ) + if (description := _dict.get('description')) is not None: + args['description'] = description + if (preference := _dict.get('preference')) is not None: + args['preference'] = preference + if (options := _dict.get('options')) is not None: + args['options'] = [ + DialogNodeOutputOptionsElement.from_dict(v) for v in options + ] + else: + raise ValueError( + 'Required property \'options\' not present in RuntimeResponseGenericRuntimeResponseTypeOption JSON' + ) + if (channels := _dict.get('channels')) is not None: + args['channels'] = [ + ResponseGenericChannel.from_dict(v) for v in channels + ] return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a RuntimeResponseGenericRuntimeResponseTypeOption object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'timezone') and self.timezone is not None: - _dict['timezone'] = self.timezone - if hasattr(self, 'user_id') and self.user_id is not None: - _dict['user_id'] = self.user_id - if hasattr(self, 'turn_count') and self.turn_count is not None: - _dict['turn_count'] = self.turn_count + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, 'title') and self.title is not None: + _dict['title'] = self.title + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'preference') and self.preference is not None: + _dict['preference'] = self.preference + if hasattr(self, 'options') and self.options is not None: + options_list = [] + for v in self.options: + if isinstance(v, dict): + options_list.append(v) + else: + options_list.append(v.to_dict()) + _dict['options'] = options_list + if hasattr(self, 'channels') and self.channels is not None: + channels_list = [] + for v in self.channels: + if isinstance(v, dict): + channels_list.append(v) + else: + channels_list.append(v.to_dict()) + _dict['channels'] = channels_list return _dict - def __str__(self): - """Return a `str` version of this MessageContextGlobalSystem object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this RuntimeResponseGenericRuntimeResponseTypeOption object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__( + self, + other: 'RuntimeResponseGenericRuntimeResponseTypeOption') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__( + self, + other: 'RuntimeResponseGenericRuntimeResponseTypeOption') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other + class PreferenceEnum(str, Enum): + """ + The preferred type of control to display. + """ + + DROPDOWN = 'dropdown' + BUTTON = 'button' + -class MessageContextSkills(object): +class RuntimeResponseGenericRuntimeResponseTypePause(RuntimeResponseGeneric): """ - Information specific to particular skills used by the Assistant. - **Note:** Currently, only a single property named `main skill` is supported. This - object contains variables that apply to the dialog skill used by the assistant. + RuntimeResponseGenericRuntimeResponseTypePause. + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + :param int time: How long to pause, in milliseconds. + :param bool typing: (optional) Whether to send a "user is typing" event during + the pause. + :param List[ResponseGenericChannel] channels: (optional) An array of objects + specifying channels for which the response is intended. If **channels** is + present, the response is intended for a built-in integration and should not be + handled by an API client. """ - def __init__(self, **kwargs): + def __init__( + self, + response_type: str, + time: int, + *, + typing: Optional[bool] = None, + channels: Optional[List['ResponseGenericChannel']] = None, + ) -> None: """ - Initialize a MessageContextSkills object. + Initialize a RuntimeResponseGenericRuntimeResponseTypePause object. - :param **kwargs: (optional) Any additional properties. + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + :param int time: How long to pause, in milliseconds. + :param bool typing: (optional) Whether to send a "user is typing" event + during the pause. + :param List[ResponseGenericChannel] channels: (optional) An array of + objects specifying channels for which the response is intended. If + **channels** is present, the response is intended for a built-in + integration and should not be handled by an API client. """ - for _key, _value in kwargs.items(): - setattr(self, _key, _value) + # pylint: disable=super-init-not-called + self.response_type = response_type + self.time = time + self.typing = typing + self.channels = channels @classmethod - def _from_dict(cls, _dict): - """Initialize a MessageContextSkills object from a json dictionary.""" + def from_dict( + cls, + _dict: Dict) -> 'RuntimeResponseGenericRuntimeResponseTypePause': + """Initialize a RuntimeResponseGenericRuntimeResponseTypePause object from a json dictionary.""" args = {} - xtra = _dict.copy() - args.update(xtra) + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type + else: + raise ValueError( + 'Required property \'response_type\' not present in RuntimeResponseGenericRuntimeResponseTypePause JSON' + ) + if (time := _dict.get('time')) is not None: + args['time'] = time + else: + raise ValueError( + 'Required property \'time\' not present in RuntimeResponseGenericRuntimeResponseTypePause JSON' + ) + if (typing := _dict.get('typing')) is not None: + args['typing'] = typing + if (channels := _dict.get('channels')) is not None: + args['channels'] = [ + ResponseGenericChannel.from_dict(v) for v in channels + ] return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a RuntimeResponseGenericRuntimeResponseTypePause object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, '_additionalProperties'): - for _key in self._additionalProperties: - _value = getattr(self, _key, None) - if _value is not None: - _dict[_key] = _value + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, 'time') and self.time is not None: + _dict['time'] = self.time + if hasattr(self, 'typing') and self.typing is not None: + _dict['typing'] = self.typing + if hasattr(self, 'channels') and self.channels is not None: + channels_list = [] + for v in self.channels: + if isinstance(v, dict): + channels_list.append(v) + else: + channels_list.append(v.to_dict()) + _dict['channels'] = channels_list return _dict - def __setattr__(self, name, value): - properties = {} - if not hasattr(self, '_additionalProperties'): - super(MessageContextSkills, self).__setattr__( - '_additionalProperties', set()) - if name not in properties: - self._additionalProperties.add(name) - super(MessageContextSkills, self).__setattr__(name, value) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __str__(self): - """Return a `str` version of this MessageContextSkills object.""" - return json.dumps(self._to_dict(), indent=2) + def __str__(self) -> str: + """Return a `str` version of this RuntimeResponseGenericRuntimeResponseTypePause object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, + other: 'RuntimeResponseGenericRuntimeResponseTypePause') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, + other: 'RuntimeResponseGenericRuntimeResponseTypePause') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class MessageInput(object): +class RuntimeResponseGenericRuntimeResponseTypeSearch(RuntimeResponseGeneric): + """ + RuntimeResponseGenericRuntimeResponseTypeSearch. + + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + :param str header: The title or introductory text to show before the response. + This text is defined in the search skill configuration. + :param List[SearchResult] primary_results: An array of objects that contains the + search results to be displayed in the initial response to the user. + :param List[SearchResult] additional_results: An array of objects that contains + additional search results that can be displayed to the user upon request. + :param List[ResponseGenericChannel] channels: (optional) An array of objects + specifying channels for which the response is intended. If **channels** is + present, the response is intended for a built-in integration and should not be + handled by an API client. """ - An input object that includes the input text. - :attr str message_type: (optional) The type of user input. Currently, only text input - is supported. - :attr str text: (optional) The text of the user input. This string cannot contain - carriage return, newline, or tab characters, and it must be no longer than 2048 - characters. - :attr MessageInputOptions options: (optional) Optional properties that control how the - assistant responds. - :attr list[RuntimeIntent] intents: (optional) Intents to use when evaluating the user - input. Include intents from the previous response to continue using those intents - rather than trying to recognize intents in the new input. - :attr list[RuntimeEntity] entities: (optional) Entities to use when evaluating the - message. Include entities from the previous response to continue using those entities - rather than detecting entities in the new input. - :attr str suggestion_id: (optional) For internal use only. - """ - - def __init__(self, - message_type=None, - text=None, - options=None, - intents=None, - entities=None, - suggestion_id=None): + def __init__( + self, + response_type: str, + header: str, + primary_results: List['SearchResult'], + additional_results: List['SearchResult'], + *, + channels: Optional[List['ResponseGenericChannel']] = None, + ) -> None: """ - Initialize a MessageInput object. + Initialize a RuntimeResponseGenericRuntimeResponseTypeSearch object. - :param str message_type: (optional) The type of user input. Currently, only text - input is supported. - :param str text: (optional) The text of the user input. This string cannot contain - carriage return, newline, or tab characters, and it must be no longer than 2048 - characters. - :param MessageInputOptions options: (optional) Optional properties that control - how the assistant responds. - :param list[RuntimeIntent] intents: (optional) Intents to use when evaluating the - user input. Include intents from the previous response to continue using those - intents rather than trying to recognize intents in the new input. - :param list[RuntimeEntity] entities: (optional) Entities to use when evaluating - the message. Include entities from the previous response to continue using those - entities rather than detecting entities in the new input. - :param str suggestion_id: (optional) For internal use only. + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + :param str header: The title or introductory text to show before the + response. This text is defined in the search skill configuration. + :param List[SearchResult] primary_results: An array of objects that + contains the search results to be displayed in the initial response to the + user. + :param List[SearchResult] additional_results: An array of objects that + contains additional search results that can be displayed to the user upon + request. + :param List[ResponseGenericChannel] channels: (optional) An array of + objects specifying channels for which the response is intended. If + **channels** is present, the response is intended for a built-in + integration and should not be handled by an API client. """ - self.message_type = message_type - self.text = text - self.options = options - self.intents = intents - self.entities = entities - self.suggestion_id = suggestion_id + # pylint: disable=super-init-not-called + self.response_type = response_type + self.header = header + self.primary_results = primary_results + self.additional_results = additional_results + self.channels = channels @classmethod - def _from_dict(cls, _dict): - """Initialize a MessageInput object from a json dictionary.""" + def from_dict( + cls, + _dict: Dict) -> 'RuntimeResponseGenericRuntimeResponseTypeSearch': + """Initialize a RuntimeResponseGenericRuntimeResponseTypeSearch object from a json dictionary.""" args = {} - if 'message_type' in _dict: - args['message_type'] = _dict.get('message_type') - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'options' in _dict: - args['options'] = MessageInputOptions._from_dict( - _dict.get('options')) - if 'intents' in _dict: - args['intents'] = [ - RuntimeIntent._from_dict(x) for x in (_dict.get('intents')) + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type + else: + raise ValueError( + 'Required property \'response_type\' not present in RuntimeResponseGenericRuntimeResponseTypeSearch JSON' + ) + if (header := _dict.get('header')) is not None: + args['header'] = header + else: + raise ValueError( + 'Required property \'header\' not present in RuntimeResponseGenericRuntimeResponseTypeSearch JSON' + ) + if (primary_results := _dict.get('primary_results')) is not None: + args['primary_results'] = [ + SearchResult.from_dict(v) for v in primary_results + ] + else: + raise ValueError( + 'Required property \'primary_results\' not present in RuntimeResponseGenericRuntimeResponseTypeSearch JSON' + ) + if (additional_results := _dict.get('additional_results')) is not None: + args['additional_results'] = [ + SearchResult.from_dict(v) for v in additional_results ] - if 'entities' in _dict: - args['entities'] = [ - RuntimeEntity._from_dict(x) for x in (_dict.get('entities')) + else: + raise ValueError( + 'Required property \'additional_results\' not present in RuntimeResponseGenericRuntimeResponseTypeSearch JSON' + ) + if (channels := _dict.get('channels')) is not None: + args['channels'] = [ + ResponseGenericChannel.from_dict(v) for v in channels ] - if 'suggestion_id' in _dict: - args['suggestion_id'] = _dict.get('suggestion_id') return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a RuntimeResponseGenericRuntimeResponseTypeSearch object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'message_type') and self.message_type is not None: - _dict['message_type'] = self.message_type - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - if hasattr(self, 'options') and self.options is not None: - _dict['options'] = self.options._to_dict() - if hasattr(self, 'intents') and self.intents is not None: - _dict['intents'] = [x._to_dict() for x in self.intents] - if hasattr(self, 'entities') and self.entities is not None: - _dict['entities'] = [x._to_dict() for x in self.entities] - if hasattr(self, 'suggestion_id') and self.suggestion_id is not None: - _dict['suggestion_id'] = self.suggestion_id + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, 'header') and self.header is not None: + _dict['header'] = self.header + if hasattr(self, + 'primary_results') and self.primary_results is not None: + primary_results_list = [] + for v in self.primary_results: + if isinstance(v, dict): + primary_results_list.append(v) + else: + primary_results_list.append(v.to_dict()) + _dict['primary_results'] = primary_results_list + if hasattr( + self, + 'additional_results') and self.additional_results is not None: + additional_results_list = [] + for v in self.additional_results: + if isinstance(v, dict): + additional_results_list.append(v) + else: + additional_results_list.append(v.to_dict()) + _dict['additional_results'] = additional_results_list + if hasattr(self, 'channels') and self.channels is not None: + channels_list = [] + for v in self.channels: + if isinstance(v, dict): + channels_list.append(v) + else: + channels_list.append(v.to_dict()) + _dict['channels'] = channels_list return _dict - def __str__(self): - """Return a `str` version of this MessageInput object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __eq__(self, other): + def __str__(self) -> str: + """Return a `str` version of this RuntimeResponseGenericRuntimeResponseTypeSearch object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__( + self, + other: 'RuntimeResponseGenericRuntimeResponseTypeSearch') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__( + self, + other: 'RuntimeResponseGenericRuntimeResponseTypeSearch') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class MessageInputOptions(object): +class RuntimeResponseGenericRuntimeResponseTypeSuggestion( + RuntimeResponseGeneric): """ - Optional properties that control how the assistant responds. + RuntimeResponseGenericRuntimeResponseTypeSuggestion. - :attr bool debug: (optional) Whether to return additional diagnostic information. Set - to `true` to return additional information under the `output.debug` key. - :attr bool restart: (optional) Whether to restart dialog processing at the root of the - dialog, regardless of any previously visited nodes. **Note:** This does not affect - `turn_count` or any other context variables. - :attr bool alternate_intents: (optional) Whether to return more than one intent. Set - to `true` to return all matching intents. - :attr bool return_context: (optional) Whether to return session context with the - response. If you specify `true`, the response will include the `context` property. + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + :param str title: The title or introductory text to show before the response. + :param List[DialogSuggestion] suggestions: An array of objects describing the + possible matching dialog nodes from which the user can choose. + :param List[ResponseGenericChannel] channels: (optional) An array of objects + specifying channels for which the response is intended. If **channels** is + present, the response is intended for a built-in integration and should not be + handled by an API client. """ - def __init__(self, - debug=None, - restart=None, - alternate_intents=None, - return_context=None): + def __init__( + self, + response_type: str, + title: str, + suggestions: List['DialogSuggestion'], + *, + channels: Optional[List['ResponseGenericChannel']] = None, + ) -> None: """ - Initialize a MessageInputOptions object. + Initialize a RuntimeResponseGenericRuntimeResponseTypeSuggestion object. - :param bool debug: (optional) Whether to return additional diagnostic information. - Set to `true` to return additional information under the `output.debug` key. - :param bool restart: (optional) Whether to restart dialog processing at the root - of the dialog, regardless of any previously visited nodes. **Note:** This does not - affect `turn_count` or any other context variables. - :param bool alternate_intents: (optional) Whether to return more than one intent. - Set to `true` to return all matching intents. - :param bool return_context: (optional) Whether to return session context with the - response. If you specify `true`, the response will include the `context` property. + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + :param str title: The title or introductory text to show before the + response. + :param List[DialogSuggestion] suggestions: An array of objects describing + the possible matching dialog nodes from which the user can choose. + :param List[ResponseGenericChannel] channels: (optional) An array of + objects specifying channels for which the response is intended. If + **channels** is present, the response is intended for a built-in + integration and should not be handled by an API client. """ - self.debug = debug - self.restart = restart - self.alternate_intents = alternate_intents - self.return_context = return_context + # pylint: disable=super-init-not-called + self.response_type = response_type + self.title = title + self.suggestions = suggestions + self.channels = channels @classmethod - def _from_dict(cls, _dict): - """Initialize a MessageInputOptions object from a json dictionary.""" + def from_dict( + cls, _dict: Dict + ) -> 'RuntimeResponseGenericRuntimeResponseTypeSuggestion': + """Initialize a RuntimeResponseGenericRuntimeResponseTypeSuggestion object from a json dictionary.""" args = {} - if 'debug' in _dict: - args['debug'] = _dict.get('debug') - if 'restart' in _dict: - args['restart'] = _dict.get('restart') - if 'alternate_intents' in _dict: - args['alternate_intents'] = _dict.get('alternate_intents') - if 'return_context' in _dict: - args['return_context'] = _dict.get('return_context') + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type + else: + raise ValueError( + 'Required property \'response_type\' not present in RuntimeResponseGenericRuntimeResponseTypeSuggestion JSON' + ) + if (title := _dict.get('title')) is not None: + args['title'] = title + else: + raise ValueError( + 'Required property \'title\' not present in RuntimeResponseGenericRuntimeResponseTypeSuggestion JSON' + ) + if (suggestions := _dict.get('suggestions')) is not None: + args['suggestions'] = [ + DialogSuggestion.from_dict(v) for v in suggestions + ] + else: + raise ValueError( + 'Required property \'suggestions\' not present in RuntimeResponseGenericRuntimeResponseTypeSuggestion JSON' + ) + if (channels := _dict.get('channels')) is not None: + args['channels'] = [ + ResponseGenericChannel.from_dict(v) for v in channels + ] return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a RuntimeResponseGenericRuntimeResponseTypeSuggestion object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'debug') and self.debug is not None: - _dict['debug'] = self.debug - if hasattr(self, 'restart') and self.restart is not None: - _dict['restart'] = self.restart - if hasattr(self, - 'alternate_intents') and self.alternate_intents is not None: - _dict['alternate_intents'] = self.alternate_intents - if hasattr(self, 'return_context') and self.return_context is not None: - _dict['return_context'] = self.return_context + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, 'title') and self.title is not None: + _dict['title'] = self.title + if hasattr(self, 'suggestions') and self.suggestions is not None: + suggestions_list = [] + for v in self.suggestions: + if isinstance(v, dict): + suggestions_list.append(v) + else: + suggestions_list.append(v.to_dict()) + _dict['suggestions'] = suggestions_list + if hasattr(self, 'channels') and self.channels is not None: + channels_list = [] + for v in self.channels: + if isinstance(v, dict): + channels_list.append(v) + else: + channels_list.append(v.to_dict()) + _dict['channels'] = channels_list return _dict - def __str__(self): - """Return a `str` version of this MessageInputOptions object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __eq__(self, other): + def __str__(self) -> str: + """Return a `str` version of this RuntimeResponseGenericRuntimeResponseTypeSuggestion object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__( + self, other: 'RuntimeResponseGenericRuntimeResponseTypeSuggestion' + ) -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__( + self, other: 'RuntimeResponseGenericRuntimeResponseTypeSuggestion' + ) -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class MessageOutput(object): +class RuntimeResponseGenericRuntimeResponseTypeText(RuntimeResponseGeneric): + """ + RuntimeResponseGenericRuntimeResponseTypeText. + + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + :param str text: The text of the response. + :param List[ResponseGenericChannel] channels: (optional) An array of objects + specifying channels for which the response is intended. If **channels** is + present, the response is intended for a built-in integration and should not be + handled by an API client. """ - Assistant output to be rendered or processed by the client. - :attr list[DialogRuntimeResponseGeneric] generic: (optional) Output intended for any - channel. It is the responsibility of the client application to implement the supported - response types. - :attr list[RuntimeIntent] intents: (optional) An array of intents recognized in the - user input, sorted in descending order of confidence. - :attr list[RuntimeEntity] entities: (optional) An array of entities identified in the - user input. - :attr list[DialogNodeAction] actions: (optional) An array of objects describing any - actions requested by the dialog node. - :attr MessageOutputDebug debug: (optional) Additional detailed information about a - message response and how it was generated. - :attr dict user_defined: (optional) An object containing any custom properties - included in the response. This object includes any arbitrary properties defined in the - dialog JSON editor as part of the dialog node output. - """ - - def __init__(self, - generic=None, - intents=None, - entities=None, - actions=None, - debug=None, - user_defined=None): + def __init__( + self, + response_type: str, + text: str, + *, + channels: Optional[List['ResponseGenericChannel']] = None, + ) -> None: """ - Initialize a MessageOutput object. + Initialize a RuntimeResponseGenericRuntimeResponseTypeText object. - :param list[DialogRuntimeResponseGeneric] generic: (optional) Output intended for - any channel. It is the responsibility of the client application to implement the - supported response types. - :param list[RuntimeIntent] intents: (optional) An array of intents recognized in - the user input, sorted in descending order of confidence. - :param list[RuntimeEntity] entities: (optional) An array of entities identified in - the user input. - :param list[DialogNodeAction] actions: (optional) An array of objects describing - any actions requested by the dialog node. - :param MessageOutputDebug debug: (optional) Additional detailed information about - a message response and how it was generated. - :param dict user_defined: (optional) An object containing any custom properties - included in the response. This object includes any arbitrary properties defined in - the dialog JSON editor as part of the dialog node output. + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + :param str text: The text of the response. + :param List[ResponseGenericChannel] channels: (optional) An array of + objects specifying channels for which the response is intended. If + **channels** is present, the response is intended for a built-in + integration and should not be handled by an API client. """ - self.generic = generic - self.intents = intents - self.entities = entities - self.actions = actions - self.debug = debug - self.user_defined = user_defined + # pylint: disable=super-init-not-called + self.response_type = response_type + self.text = text + self.channels = channels @classmethod - def _from_dict(cls, _dict): - """Initialize a MessageOutput object from a json dictionary.""" + def from_dict( + cls, + _dict: Dict) -> 'RuntimeResponseGenericRuntimeResponseTypeText': + """Initialize a RuntimeResponseGenericRuntimeResponseTypeText object from a json dictionary.""" args = {} - if 'generic' in _dict: - args['generic'] = [ - DialogRuntimeResponseGeneric._from_dict(x) - for x in (_dict.get('generic')) - ] - if 'intents' in _dict: - args['intents'] = [ - RuntimeIntent._from_dict(x) for x in (_dict.get('intents')) - ] - if 'entities' in _dict: - args['entities'] = [ - RuntimeEntity._from_dict(x) for x in (_dict.get('entities')) - ] - if 'actions' in _dict: - args['actions'] = [ - DialogNodeAction._from_dict(x) for x in (_dict.get('actions')) + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type + else: + raise ValueError( + 'Required property \'response_type\' not present in RuntimeResponseGenericRuntimeResponseTypeText JSON' + ) + if (text := _dict.get('text')) is not None: + args['text'] = text + else: + raise ValueError( + 'Required property \'text\' not present in RuntimeResponseGenericRuntimeResponseTypeText JSON' + ) + if (channels := _dict.get('channels')) is not None: + args['channels'] = [ + ResponseGenericChannel.from_dict(v) for v in channels ] - if 'debug' in _dict: - args['debug'] = MessageOutputDebug._from_dict(_dict.get('debug')) - if 'user_defined' in _dict: - args['user_defined'] = _dict.get('user_defined') return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a RuntimeResponseGenericRuntimeResponseTypeText object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'generic') and self.generic is not None: - _dict['generic'] = [x._to_dict() for x in self.generic] - if hasattr(self, 'intents') and self.intents is not None: - _dict['intents'] = [x._to_dict() for x in self.intents] - if hasattr(self, 'entities') and self.entities is not None: - _dict['entities'] = [x._to_dict() for x in self.entities] - if hasattr(self, 'actions') and self.actions is not None: - _dict['actions'] = [x._to_dict() for x in self.actions] - if hasattr(self, 'debug') and self.debug is not None: - _dict['debug'] = self.debug._to_dict() - if hasattr(self, 'user_defined') and self.user_defined is not None: - _dict['user_defined'] = self.user_defined + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, 'text') and self.text is not None: + _dict['text'] = self.text + if hasattr(self, 'channels') and self.channels is not None: + channels_list = [] + for v in self.channels: + if isinstance(v, dict): + channels_list.append(v) + else: + channels_list.append(v.to_dict()) + _dict['channels'] = channels_list return _dict - def __str__(self): - """Return a `str` version of this MessageOutput object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __eq__(self, other): + def __str__(self) -> str: + """Return a `str` version of this RuntimeResponseGenericRuntimeResponseTypeText object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, + other: 'RuntimeResponseGenericRuntimeResponseTypeText') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, + other: 'RuntimeResponseGenericRuntimeResponseTypeText') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class MessageOutputDebug(object): +class RuntimeResponseGenericRuntimeResponseTypeUserDefined( + RuntimeResponseGeneric): """ - Additional detailed information about a message response and how it was generated. + RuntimeResponseGenericRuntimeResponseTypeUserDefined. - :attr list[DialogNodesVisited] nodes_visited: (optional) An array of objects - containing detailed diagnostic information about the nodes that were triggered during - processing of the input message. - :attr list[DialogLogMessage] log_messages: (optional) An array of up to 50 messages - logged with the request. - :attr bool branch_exited: (optional) Assistant sets this to true when this message - response concludes or interrupts a dialog. - :attr str branch_exited_reason: (optional) When `branch_exited` is set to `true` by - the Assistant, the `branch_exited_reason` specifies whether the dialog completed by - itself or got interrupted. + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + :param dict user_defined: An object containing any properties for the + user-defined response type. + :param List[ResponseGenericChannel] channels: (optional) An array of objects + specifying channels for which the response is intended. If **channels** is + present, the response is intended for a built-in integration and should not be + handled by an API client. """ - def __init__(self, - nodes_visited=None, - log_messages=None, - branch_exited=None, - branch_exited_reason=None): + def __init__( + self, + response_type: str, + user_defined: dict, + *, + channels: Optional[List['ResponseGenericChannel']] = None, + ) -> None: """ - Initialize a MessageOutputDebug object. + Initialize a RuntimeResponseGenericRuntimeResponseTypeUserDefined object. - :param list[DialogNodesVisited] nodes_visited: (optional) An array of objects - containing detailed diagnostic information about the nodes that were triggered - during processing of the input message. - :param list[DialogLogMessage] log_messages: (optional) An array of up to 50 - messages logged with the request. - :param bool branch_exited: (optional) Assistant sets this to true when this - message response concludes or interrupts a dialog. - :param str branch_exited_reason: (optional) When `branch_exited` is set to `true` - by the Assistant, the `branch_exited_reason` specifies whether the dialog - completed by itself or got interrupted. + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + :param dict user_defined: An object containing any properties for the + user-defined response type. + :param List[ResponseGenericChannel] channels: (optional) An array of + objects specifying channels for which the response is intended. If + **channels** is present, the response is intended for a built-in + integration and should not be handled by an API client. """ - self.nodes_visited = nodes_visited - self.log_messages = log_messages - self.branch_exited = branch_exited - self.branch_exited_reason = branch_exited_reason + # pylint: disable=super-init-not-called + self.response_type = response_type + self.user_defined = user_defined + self.channels = channels @classmethod - def _from_dict(cls, _dict): - """Initialize a MessageOutputDebug object from a json dictionary.""" + def from_dict( + cls, _dict: Dict + ) -> 'RuntimeResponseGenericRuntimeResponseTypeUserDefined': + """Initialize a RuntimeResponseGenericRuntimeResponseTypeUserDefined object from a json dictionary.""" args = {} - if 'nodes_visited' in _dict: - args['nodes_visited'] = [ - DialogNodesVisited._from_dict(x) - for x in (_dict.get('nodes_visited')) - ] - if 'log_messages' in _dict: - args['log_messages'] = [ - DialogLogMessage._from_dict(x) - for x in (_dict.get('log_messages')) + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type + else: + raise ValueError( + 'Required property \'response_type\' not present in RuntimeResponseGenericRuntimeResponseTypeUserDefined JSON' + ) + if (user_defined := _dict.get('user_defined')) is not None: + args['user_defined'] = user_defined + else: + raise ValueError( + 'Required property \'user_defined\' not present in RuntimeResponseGenericRuntimeResponseTypeUserDefined JSON' + ) + if (channels := _dict.get('channels')) is not None: + args['channels'] = [ + ResponseGenericChannel.from_dict(v) for v in channels ] - if 'branch_exited' in _dict: - args['branch_exited'] = _dict.get('branch_exited') - if 'branch_exited_reason' in _dict: - args['branch_exited_reason'] = _dict.get('branch_exited_reason') return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a RuntimeResponseGenericRuntimeResponseTypeUserDefined object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'nodes_visited') and self.nodes_visited is not None: - _dict['nodes_visited'] = [x._to_dict() for x in self.nodes_visited] - if hasattr(self, 'log_messages') and self.log_messages is not None: - _dict['log_messages'] = [x._to_dict() for x in self.log_messages] - if hasattr(self, 'branch_exited') and self.branch_exited is not None: - _dict['branch_exited'] = self.branch_exited - if hasattr(self, 'branch_exited_reason' - ) and self.branch_exited_reason is not None: - _dict['branch_exited_reason'] = self.branch_exited_reason + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, 'user_defined') and self.user_defined is not None: + _dict['user_defined'] = self.user_defined + if hasattr(self, 'channels') and self.channels is not None: + channels_list = [] + for v in self.channels: + if isinstance(v, dict): + channels_list.append(v) + else: + channels_list.append(v.to_dict()) + _dict['channels'] = channels_list return _dict - def __str__(self): - """Return a `str` version of this MessageOutputDebug object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __eq__(self, other): + def __str__(self) -> str: + """Return a `str` version of this RuntimeResponseGenericRuntimeResponseTypeUserDefined object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__( + self, other: 'RuntimeResponseGenericRuntimeResponseTypeUserDefined' + ) -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__( + self, other: 'RuntimeResponseGenericRuntimeResponseTypeUserDefined' + ) -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class MessageResponse(object): +class RuntimeResponseGenericRuntimeResponseTypeVideo(RuntimeResponseGeneric): """ - A response from the Watson Assistant service. + RuntimeResponseGenericRuntimeResponseTypeVideo. - :attr MessageOutput output: Assistant output to be rendered or processed by the - client. - :attr MessageContext context: (optional) State information for the conversation. The - context is stored by the assistant on a per-session basis. You can use this property - to access context variables. - **Note:** The context is included in message responses only if - **return_context**=`true` in the message request. + :param str response_type: The type of response returned by the dialog node. The + specified response type must be supported by the client application or channel. + :param str source: The `https:` URL of the video. + :param str title: (optional) The title or introductory text to show before the + response. + :param str description: (optional) The description to show with the the + response. + :param List[ResponseGenericChannel] channels: (optional) An array of objects + specifying channels for which the response is intended. If **channels** is + present, the response is intended for a built-in integration and should not be + handled by an API client. + :param dict channel_options: (optional) For internal use only. + :param str alt_text: (optional) Descriptive text that can be used for screen + readers or other situations where the video cannot be seen. """ - def __init__(self, output, context=None): + def __init__( + self, + response_type: str, + source: str, + *, + title: Optional[str] = None, + description: Optional[str] = None, + channels: Optional[List['ResponseGenericChannel']] = None, + channel_options: Optional[dict] = None, + alt_text: Optional[str] = None, + ) -> None: """ - Initialize a MessageResponse object. + Initialize a RuntimeResponseGenericRuntimeResponseTypeVideo object. - :param MessageOutput output: Assistant output to be rendered or processed by the - client. - :param MessageContext context: (optional) State information for the conversation. - The context is stored by the assistant on a per-session basis. You can use this - property to access context variables. - **Note:** The context is included in message responses only if - **return_context**=`true` in the message request. + :param str response_type: The type of response returned by the dialog node. + The specified response type must be supported by the client application or + channel. + :param str source: The `https:` URL of the video. + :param str title: (optional) The title or introductory text to show before + the response. + :param str description: (optional) The description to show with the the + response. + :param List[ResponseGenericChannel] channels: (optional) An array of + objects specifying channels for which the response is intended. If + **channels** is present, the response is intended for a built-in + integration and should not be handled by an API client. + :param dict channel_options: (optional) For internal use only. + :param str alt_text: (optional) Descriptive text that can be used for + screen readers or other situations where the video cannot be seen. """ - self.output = output - self.context = context + # pylint: disable=super-init-not-called + self.response_type = response_type + self.source = source + self.title = title + self.description = description + self.channels = channels + self.channel_options = channel_options + self.alt_text = alt_text @classmethod - def _from_dict(cls, _dict): - """Initialize a MessageResponse object from a json dictionary.""" + def from_dict( + cls, + _dict: Dict) -> 'RuntimeResponseGenericRuntimeResponseTypeVideo': + """Initialize a RuntimeResponseGenericRuntimeResponseTypeVideo object from a json dictionary.""" args = {} - if 'output' in _dict: - args['output'] = MessageOutput._from_dict(_dict.get('output')) + if (response_type := _dict.get('response_type')) is not None: + args['response_type'] = response_type + else: + raise ValueError( + 'Required property \'response_type\' not present in RuntimeResponseGenericRuntimeResponseTypeVideo JSON' + ) + if (source := _dict.get('source')) is not None: + args['source'] = source else: raise ValueError( - 'Required property \'output\' not present in MessageResponse JSON' + 'Required property \'source\' not present in RuntimeResponseGenericRuntimeResponseTypeVideo JSON' ) - if 'context' in _dict: - args['context'] = MessageContext._from_dict(_dict.get('context')) + if (title := _dict.get('title')) is not None: + args['title'] = title + if (description := _dict.get('description')) is not None: + args['description'] = description + if (channels := _dict.get('channels')) is not None: + args['channels'] = [ + ResponseGenericChannel.from_dict(v) for v in channels + ] + if (channel_options := _dict.get('channel_options')) is not None: + args['channel_options'] = channel_options + if (alt_text := _dict.get('alt_text')) is not None: + args['alt_text'] = alt_text return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a RuntimeResponseGenericRuntimeResponseTypeVideo object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'output') and self.output is not None: - _dict['output'] = self.output._to_dict() - if hasattr(self, 'context') and self.context is not None: - _dict['context'] = self.context._to_dict() + if hasattr(self, 'response_type') and self.response_type is not None: + _dict['response_type'] = self.response_type + if hasattr(self, 'source') and self.source is not None: + _dict['source'] = self.source + if hasattr(self, 'title') and self.title is not None: + _dict['title'] = self.title + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'channels') and self.channels is not None: + channels_list = [] + for v in self.channels: + if isinstance(v, dict): + channels_list.append(v) + else: + channels_list.append(v.to_dict()) + _dict['channels'] = channels_list + if hasattr(self, + 'channel_options') and self.channel_options is not None: + _dict['channel_options'] = self.channel_options + if hasattr(self, 'alt_text') and self.alt_text is not None: + _dict['alt_text'] = self.alt_text return _dict - def __str__(self): - """Return a `str` version of this MessageResponse object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __eq__(self, other): + def __str__(self) -> str: + """Return a `str` version of this RuntimeResponseGenericRuntimeResponseTypeVideo object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, + other: 'RuntimeResponseGenericRuntimeResponseTypeVideo') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, + other: 'RuntimeResponseGenericRuntimeResponseTypeVideo') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class RuntimeEntity(object): +class StatelessMessageStreamResponseMessageStreamCompleteItem( + StatelessMessageStreamResponse): """ - A term from the request that was identified as an entity. + A completed response item. A complete item is a composition of every streamed partial + item with the same streaming_metadata.id, and each complete item contains its own + unique streaming_metadata.id. - :attr str entity: An entity detected in the input. - :attr list[int] location: An array of zero-based character offsets that indicate where - the detected entity values begin and end in the input text. - :attr str value: The term in the input text that was recognized as an entity value. - :attr float confidence: (optional) A decimal percentage that represents Watson's - confidence in the entity. - :attr dict metadata: (optional) Any metadata for the entity. - :attr list[CaptureGroup] groups: (optional) The recognized capture groups for the - entity, as defined by the entity pattern. + :param CompleteItem complete_item: (optional) """ - def __init__(self, - entity, - location, - value, - confidence=None, - metadata=None, - groups=None): + def __init__( + self, + *, + complete_item: Optional['CompleteItem'] = None, + ) -> None: """ - Initialize a RuntimeEntity object. + Initialize a StatelessMessageStreamResponseMessageStreamCompleteItem object. - :param str entity: An entity detected in the input. - :param list[int] location: An array of zero-based character offsets that indicate - where the detected entity values begin and end in the input text. - :param str value: The term in the input text that was recognized as an entity - value. - :param float confidence: (optional) A decimal percentage that represents Watson's - confidence in the entity. - :param dict metadata: (optional) Any metadata for the entity. - :param list[CaptureGroup] groups: (optional) The recognized capture groups for the - entity, as defined by the entity pattern. + :param CompleteItem complete_item: (optional) """ - self.entity = entity - self.location = location - self.value = value - self.confidence = confidence - self.metadata = metadata - self.groups = groups + # pylint: disable=super-init-not-called + self.complete_item = complete_item @classmethod - def _from_dict(cls, _dict): - """Initialize a RuntimeEntity object from a json dictionary.""" + def from_dict( + cls, _dict: Dict + ) -> 'StatelessMessageStreamResponseMessageStreamCompleteItem': + """Initialize a StatelessMessageStreamResponseMessageStreamCompleteItem object from a json dictionary.""" args = {} - if 'entity' in _dict: - args['entity'] = _dict.get('entity') - else: - raise ValueError( - 'Required property \'entity\' not present in RuntimeEntity JSON' - ) - if 'location' in _dict: - args['location'] = _dict.get('location') - else: - raise ValueError( - 'Required property \'location\' not present in RuntimeEntity JSON' - ) - if 'value' in _dict: - args['value'] = _dict.get('value') - else: - raise ValueError( - 'Required property \'value\' not present in RuntimeEntity JSON') - if 'confidence' in _dict: - args['confidence'] = _dict.get('confidence') - if 'metadata' in _dict: - args['metadata'] = _dict.get('metadata') - if 'groups' in _dict: - args['groups'] = [ - CaptureGroup._from_dict(x) for x in (_dict.get('groups')) - ] + if (complete_item := _dict.get('complete_item')) is not None: + args['complete_item'] = CompleteItem.from_dict(complete_item) return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a StatelessMessageStreamResponseMessageStreamCompleteItem object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'entity') and self.entity is not None: - _dict['entity'] = self.entity - if hasattr(self, 'location') and self.location is not None: - _dict['location'] = self.location - if hasattr(self, 'value') and self.value is not None: - _dict['value'] = self.value - if hasattr(self, 'confidence') and self.confidence is not None: - _dict['confidence'] = self.confidence - if hasattr(self, 'metadata') and self.metadata is not None: - _dict['metadata'] = self.metadata - if hasattr(self, 'groups') and self.groups is not None: - _dict['groups'] = [x._to_dict() for x in self.groups] + if hasattr(self, 'complete_item') and self.complete_item is not None: + if isinstance(self.complete_item, dict): + _dict['complete_item'] = self.complete_item + else: + _dict['complete_item'] = self.complete_item.to_dict() return _dict - def __str__(self): - """Return a `str` version of this RuntimeEntity object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this StatelessMessageStreamResponseMessageStreamCompleteItem object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__( + self, other: 'StatelessMessageStreamResponseMessageStreamCompleteItem' + ) -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__( + self, other: 'StatelessMessageStreamResponseMessageStreamCompleteItem' + ) -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class RuntimeIntent(object): +class StatelessMessageStreamResponseMessageStreamPartialItem( + StatelessMessageStreamResponse): """ - An intent identified in the user input. + A chunk of the streamed message response. - :attr str intent: The name of the recognized intent. - :attr float confidence: A decimal percentage that represents Watson's confidence in - the intent. + :param PartialItem partial_item: (optional) Message response partial item + content. """ - def __init__(self, intent, confidence): + def __init__( + self, + *, + partial_item: Optional['PartialItem'] = None, + ) -> None: """ - Initialize a RuntimeIntent object. + Initialize a StatelessMessageStreamResponseMessageStreamPartialItem object. - :param str intent: The name of the recognized intent. - :param float confidence: A decimal percentage that represents Watson's confidence - in the intent. + :param PartialItem partial_item: (optional) Message response partial item + content. """ - self.intent = intent - self.confidence = confidence + # pylint: disable=super-init-not-called + self.partial_item = partial_item @classmethod - def _from_dict(cls, _dict): - """Initialize a RuntimeIntent object from a json dictionary.""" + def from_dict( + cls, _dict: Dict + ) -> 'StatelessMessageStreamResponseMessageStreamPartialItem': + """Initialize a StatelessMessageStreamResponseMessageStreamPartialItem object from a json dictionary.""" args = {} - if 'intent' in _dict: - args['intent'] = _dict.get('intent') - else: - raise ValueError( - 'Required property \'intent\' not present in RuntimeIntent JSON' - ) - if 'confidence' in _dict: - args['confidence'] = _dict.get('confidence') - else: - raise ValueError( - 'Required property \'confidence\' not present in RuntimeIntent JSON' - ) + if (partial_item := _dict.get('partial_item')) is not None: + args['partial_item'] = PartialItem.from_dict(partial_item) return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a StatelessMessageStreamResponseMessageStreamPartialItem object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'intent') and self.intent is not None: - _dict['intent'] = self.intent - if hasattr(self, 'confidence') and self.confidence is not None: - _dict['confidence'] = self.confidence + if hasattr(self, 'partial_item') and self.partial_item is not None: + if isinstance(self.partial_item, dict): + _dict['partial_item'] = self.partial_item + else: + _dict['partial_item'] = self.partial_item.to_dict() return _dict - def __str__(self): - """Return a `str` version of this RuntimeIntent object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __eq__(self, other): + def __str__(self) -> str: + """Return a `str` version of this StatelessMessageStreamResponseMessageStreamPartialItem object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__( + self, other: 'StatelessMessageStreamResponseMessageStreamPartialItem' + ) -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__( + self, other: 'StatelessMessageStreamResponseMessageStreamPartialItem' + ) -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class SessionResponse(object): +class StatelessMessageStreamResponseStatelessMessageStreamFinalResponse( + StatelessMessageStreamResponse): """ - SessionResponse. + The final and stateless message response. - :attr str session_id: The session ID. + :param StatelessFinalResponse final_response: (optional) Message final response + content. """ - def __init__(self, session_id): + def __init__( + self, + *, + final_response: Optional['StatelessFinalResponse'] = None, + ) -> None: """ - Initialize a SessionResponse object. + Initialize a StatelessMessageStreamResponseStatelessMessageStreamFinalResponse object. - :param str session_id: The session ID. + :param StatelessFinalResponse final_response: (optional) Message final + response content. """ - self.session_id = session_id + # pylint: disable=super-init-not-called + self.final_response = final_response @classmethod - def _from_dict(cls, _dict): - """Initialize a SessionResponse object from a json dictionary.""" + def from_dict( + cls, _dict: Dict + ) -> 'StatelessMessageStreamResponseStatelessMessageStreamFinalResponse': + """Initialize a StatelessMessageStreamResponseStatelessMessageStreamFinalResponse object from a json dictionary.""" args = {} - if 'session_id' in _dict: - args['session_id'] = _dict.get('session_id') - else: - raise ValueError( - 'Required property \'session_id\' not present in SessionResponse JSON' - ) + if (final_response := _dict.get('final_response')) is not None: + args['final_response'] = StatelessFinalResponse.from_dict( + final_response) return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a StatelessMessageStreamResponseStatelessMessageStreamFinalResponse object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'session_id') and self.session_id is not None: - _dict['session_id'] = self.session_id + if hasattr(self, 'final_response') and self.final_response is not None: + if isinstance(self.final_response, dict): + _dict['final_response'] = self.final_response + else: + _dict['final_response'] = self.final_response.to_dict() return _dict - def __str__(self): - """Return a `str` version of this SessionResponse object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this StatelessMessageStreamResponseStatelessMessageStreamFinalResponse object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__( + self, + other: 'StatelessMessageStreamResponseStatelessMessageStreamFinalResponse' + ) -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__( + self, + other: 'StatelessMessageStreamResponseStatelessMessageStreamFinalResponse' + ) -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other diff --git a/ibm_watson/authorization_v1.py b/ibm_watson/authorization_v1.py deleted file mode 100644 index ea119dbf8..000000000 --- a/ibm_watson/authorization_v1.py +++ /dev/null @@ -1,63 +0,0 @@ -# coding: utf-8 -# Copyright 2016 IBM All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -The v1 Authorization "service" that enables developers to -retrieve a temporary access token -""" - -from ibm_cloud_sdk_core import BaseService - -try: - import urllib.parse as urlparse # Python 3 -except ImportError: - import urlparse # Python 2 - - -class AuthorizationV1(BaseService): - """ - Generates tokens, which can be used client-side to avoid exposing the - service credentials. - Tokens are valid for 1 hour and are sent using the - `X-Watson-Authorization-Token` header. - """ - default_url = "https://stream.watsonplatform.net/authorization/api" - - def __init__(self, - url=default_url, - username=None, - password=None, - use_vcap_services=True): - BaseService.__init__( - self, - 'authorization', - url, - username, - password, - use_vcap_services, - display_name='authorization') - - def get_token(self, url): - """ - Retrieves a temporary access token - """ - # A hack to avoid url-encoding the url, since the authorization service - # doesn't work with correctly encoded urls - - parsed_url = urlparse.urlsplit(url) - parsed_url = parsed_url._replace(path='/authorization/api') - self.url = urlparse.urlunsplit(parsed_url) - - response = self.request(method='GET', url='/v1/token?url=' + url) - return response.result.text diff --git a/ibm_watson/common.py b/ibm_watson/common.py index 5929c6b57..a1595d614 100644 --- a/ibm_watson/common.py +++ b/ibm_watson/common.py @@ -1,6 +1,6 @@ # coding: utf-8 -# Copyright 2019 IBM All Rights Reserved. +# Copyright 2019, 2024 IBM All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,27 +15,57 @@ # limitations under the License. import platform +import json from .version import __version__ +from typing import Iterator SDK_ANALYTICS_HEADER = 'X-IBMCloud-SDK-Analytics' USER_AGENT_HEADER = 'User-Agent' SDK_NAME = 'watson-apis-python-sdk' + def get_system_info(): - return '{0} {1} {2}'.format(platform.system(), # OS - platform.release(), # OS version - platform.python_version()) # Python version + return '{0} {1} {2}'.format( + platform.system(), # OS + platform.release(), # OS version + platform.python_version()) # Python version + + def get_user_agent(): return user_agent + def get_sdk_analytics(service_name, service_version, operation_id): return 'service_name={0};service_version={1};operation_id={2}'.format( service_name, service_version, operation_id) + user_agent = '{0}-{1} {2}'.format(SDK_NAME, __version__, get_system_info()) + def get_sdk_headers(service_name, service_version, operation_id): headers = {} - headers[SDK_ANALYTICS_HEADER] = get_sdk_analytics(service_name, service_version, operation_id) + headers[SDK_ANALYTICS_HEADER] = get_sdk_analytics(service_name, + service_version, + operation_id) headers[USER_AGENT_HEADER] = get_user_agent() return headers + + +def parse_sse_stream_data(response) -> Iterator[dict]: + event_message = None # Can be used in the future to return the event message to the user + data_json = None + + for chunk in response.iter_lines(): + decoded_chunk = chunk.decode("utf-8") + + if decoded_chunk.find("event", 0, len("event")) == 0: + event_message = decoded_chunk[len("event") + 2:] + elif decoded_chunk.find("data", 0, len("data")) == 0: + data_json_str = decoded_chunk[len("data") + 2:] + data_json = json.loads(data_json_str) + + if event_message and data_json is not None: + yield data_json + event_message = None + data_json = None diff --git a/ibm_watson/compare_comply_v1.py b/ibm_watson/compare_comply_v1.py deleted file mode 100644 index 2685042dd..000000000 --- a/ibm_watson/compare_comply_v1.py +++ /dev/null @@ -1,5228 +0,0 @@ -# coding: utf-8 - -# Copyright 2018 IBM All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -IBM Watson™ Compare and Comply analyzes governing documents to provide details about -critical aspects of the documents. -""" - -from __future__ import absolute_import - -import json -from .common import get_sdk_headers -from ibm_cloud_sdk_core import BaseService -from ibm_cloud_sdk_core import datetime_to_string, string_to_datetime -from os.path import basename - -############################################################################## -# Service -############################################################################## - - -class CompareComplyV1(BaseService): - """The Compare Comply V1 service.""" - - default_url = 'https://gateway.watsonplatform.net/compare-comply/api' - - def __init__( - self, - version, - url=default_url, - iam_apikey=None, - iam_access_token=None, - iam_url=None, - ): - """ - Construct a new client for the Compare Comply service. - - :param str version: The API version date to use with the service, in - "YYYY-MM-DD" format. Whenever the API is changed in a backwards - incompatible way, a new minor version of the API is released. - The service uses the API version for the date you specify, or - the most recent version before that date. Note that you should - not programmatically specify the current date at runtime, in - case the API has been updated since your application's release. - Instead, specify a version date that is compatible with your - application, and don't change it until your application is - ready for a later version. - - :param str url: The base url to use when contacting the service (e.g. - "https://gateway.watsonplatform.net/compare-comply/api/compare-comply/api"). - The base url may differ between IBM Cloud regions. - - :param str iam_apikey: An API key that can be used to request IAM tokens. If - this API key is provided, the SDK will manage the token and handle the - refreshing. - - :param str iam_access_token: An IAM access token is fully managed by the application. - Responsibility falls on the application to refresh the token, either before - it expires or reactively upon receiving a 401 from the service as any requests - made with an expired token will fail. - - :param str iam_url: An optional URL for the IAM service API. Defaults to - 'https://iam.cloud.ibm.com/identity/token'. - """ - - BaseService.__init__( - self, - vcap_services_name='compare-comply', - url=url, - iam_apikey=iam_apikey, - iam_access_token=iam_access_token, - iam_url=iam_url, - use_vcap_services=True, - display_name='Compare Comply') - self.version = version - - ######################### - # HTML conversion - ######################### - - def convert_to_html(self, - file, - filename=None, - file_content_type=None, - model=None, - **kwargs): - """ - Convert document to HTML. - - Converts a document to HTML. - - :param file file: The document to convert. - :param str filename: The filename for file. - :param str file_content_type: The content type of file. - :param str model: The analysis model to be used by the service. For the **Element - classification** and **Compare two documents** methods, the default is - `contracts`. For the **Extract tables** method, the default is `tables`. These - defaults apply to the standalone methods as well as to the methods' use in - batch-processing requests. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if file is None: - raise ValueError('file must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('compare-comply', 'V1', 'convert_to_html') - headers.update(sdk_headers) - - params = {'version': self.version, 'model': model} - - form_data = {} - if not filename and hasattr(file, 'name'): - filename = basename(file.name) - if not filename: - raise ValueError('filename must be provided') - form_data['file'] = (filename, file, file_content_type or - 'application/octet-stream') - - url = '/v1/html_conversion' - response = self.request( - method='POST', - url=url, - headers=headers, - params=params, - files=form_data, - accept_json=True) - return response - - ######################### - # Element classification - ######################### - - def classify_elements(self, - file, - file_content_type=None, - model=None, - **kwargs): - """ - Classify the elements of a document. - - Analyzes the structural and semantic elements of a document. - - :param file file: The document to classify. - :param str file_content_type: The content type of file. - :param str model: The analysis model to be used by the service. For the **Element - classification** and **Compare two documents** methods, the default is - `contracts`. For the **Extract tables** method, the default is `tables`. These - defaults apply to the standalone methods as well as to the methods' use in - batch-processing requests. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if file is None: - raise ValueError('file must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('compare-comply', 'V1', - 'classify_elements') - headers.update(sdk_headers) - - params = {'version': self.version, 'model': model} - - form_data = {} - form_data['file'] = (None, file, file_content_type or - 'application/octet-stream') - - url = '/v1/element_classification' - response = self.request( - method='POST', - url=url, - headers=headers, - params=params, - files=form_data, - accept_json=True) - return response - - ######################### - # Tables - ######################### - - def extract_tables(self, file, file_content_type=None, model=None, - **kwargs): - """ - Extract a document's tables. - - Analyzes the tables in a document. - - :param file file: The document on which to run table extraction. - :param str file_content_type: The content type of file. - :param str model: The analysis model to be used by the service. For the **Element - classification** and **Compare two documents** methods, the default is - `contracts`. For the **Extract tables** method, the default is `tables`. These - defaults apply to the standalone methods as well as to the methods' use in - batch-processing requests. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if file is None: - raise ValueError('file must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('compare-comply', 'V1', 'extract_tables') - headers.update(sdk_headers) - - params = {'version': self.version, 'model': model} - - form_data = {} - form_data['file'] = (None, file, file_content_type or - 'application/octet-stream') - - url = '/v1/tables' - response = self.request( - method='POST', - url=url, - headers=headers, - params=params, - files=form_data, - accept_json=True) - return response - - ######################### - # Comparison - ######################### - - def compare_documents(self, - file_1, - file_2, - file_1_content_type=None, - file_2_content_type=None, - file_1_label=None, - file_2_label=None, - model=None, - **kwargs): - """ - Compare two documents. - - Compares two input documents. Documents must be in the same format. - - :param file file_1: The first document to compare. - :param file file_2: The second document to compare. - :param str file_1_content_type: The content type of file_1. - :param str file_2_content_type: The content type of file_2. - :param str file_1_label: A text label for the first document. - :param str file_2_label: A text label for the second document. - :param str model: The analysis model to be used by the service. For the **Element - classification** and **Compare two documents** methods, the default is - `contracts`. For the **Extract tables** method, the default is `tables`. These - defaults apply to the standalone methods as well as to the methods' use in - batch-processing requests. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if file_1 is None: - raise ValueError('file_1 must be provided') - if file_2 is None: - raise ValueError('file_2 must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('compare-comply', 'V1', - 'compare_documents') - headers.update(sdk_headers) - - params = { - 'version': self.version, - 'file_1_label': file_1_label, - 'file_2_label': file_2_label, - 'model': model - } - - form_data = {} - form_data['file_1'] = (None, file_1, file_1_content_type or - 'application/octet-stream') - form_data['file_2'] = (None, file_2, file_2_content_type or - 'application/octet-stream') - - url = '/v1/comparison' - response = self.request( - method='POST', - url=url, - headers=headers, - params=params, - files=form_data, - accept_json=True) - return response - - ######################### - # Feedback - ######################### - - def add_feedback(self, feedback_data, user_id=None, comment=None, **kwargs): - """ - Add feedback. - - Adds feedback in the form of _labels_ from a subject-matter expert (SME) to a - governing document. - **Important:** Feedback is not immediately incorporated into the training model, - nor is it guaranteed to be incorporated at a later date. Instead, submitted - feedback is used to suggest future updates to the training model. - - :param FeedbackDataInput feedback_data: Feedback data for submission. - :param str user_id: An optional string identifying the user. - :param str comment: An optional comment on or description of the feedback. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if feedback_data is None: - raise ValueError('feedback_data must be provided') - feedback_data = self._convert_model(feedback_data, FeedbackDataInput) - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('compare-comply', 'V1', 'add_feedback') - headers.update(sdk_headers) - - params = {'version': self.version} - - data = { - 'feedback_data': feedback_data, - 'user_id': user_id, - 'comment': comment - } - - url = '/v1/feedback' - response = self.request( - method='POST', - url=url, - headers=headers, - params=params, - json=data, - accept_json=True) - return response - - def delete_feedback(self, feedback_id, model=None, **kwargs): - """ - Delete a specified feedback entry. - - Deletes a feedback entry with a specified `feedback_id`. - - :param str feedback_id: A string that specifies the feedback entry to be deleted - from the document. - :param str model: The analysis model to be used by the service. For the **Element - classification** and **Compare two documents** methods, the default is - `contracts`. For the **Extract tables** method, the default is `tables`. These - defaults apply to the standalone methods as well as to the methods' use in - batch-processing requests. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if feedback_id is None: - raise ValueError('feedback_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('compare-comply', 'V1', 'delete_feedback') - headers.update(sdk_headers) - - params = {'version': self.version, 'model': model} - - url = '/v1/feedback/{0}'.format(*self._encode_path_vars(feedback_id)) - response = self.request( - method='DELETE', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def get_feedback(self, feedback_id, model=None, **kwargs): - """ - List a specified feedback entry. - - Lists a feedback entry with a specified `feedback_id`. - - :param str feedback_id: A string that specifies the feedback entry to be included - in the output. - :param str model: The analysis model to be used by the service. For the **Element - classification** and **Compare two documents** methods, the default is - `contracts`. For the **Extract tables** method, the default is `tables`. These - defaults apply to the standalone methods as well as to the methods' use in - batch-processing requests. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if feedback_id is None: - raise ValueError('feedback_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('compare-comply', 'V1', 'get_feedback') - headers.update(sdk_headers) - - params = {'version': self.version, 'model': model} - - url = '/v1/feedback/{0}'.format(*self._encode_path_vars(feedback_id)) - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def list_feedback(self, - feedback_type=None, - before=None, - after=None, - document_title=None, - model_id=None, - model_version=None, - category_removed=None, - category_added=None, - category_not_changed=None, - type_removed=None, - type_added=None, - type_not_changed=None, - page_limit=None, - cursor=None, - sort=None, - include_total=None, - **kwargs): - """ - List the feedback in a document. - - Lists the feedback in a document. - - :param str feedback_type: An optional string that filters the output to include - only feedback with the specified feedback type. The only permitted value is - `element_classification`. - :param date before: An optional string in the format `YYYY-MM-DD` that filters the - output to include only feedback that was added before the specified date. - :param date after: An optional string in the format `YYYY-MM-DD` that filters the - output to include only feedback that was added after the specified date. - :param str document_title: An optional string that filters the output to include - only feedback from the document with the specified `document_title`. - :param str model_id: An optional string that filters the output to include only - feedback with the specified `model_id`. The only permitted value is `contracts`. - :param str model_version: An optional string that filters the output to include - only feedback with the specified `model_version`. - :param str category_removed: An optional string in the form of a comma-separated - list of categories. If this is specified, the service filters the output to - include only feedback that has at least one category from the list removed. - :param str category_added: An optional string in the form of a comma-separated - list of categories. If this is specified, the service filters the output to - include only feedback that has at least one category from the list added. - :param str category_not_changed: An optional string in the form of a - comma-separated list of categories. If this is specified, the service filters the - output to include only feedback that has at least one category from the list - unchanged. - :param str type_removed: An optional string of comma-separated `nature`:`party` - pairs. If this is specified, the service filters the output to include only - feedback that has at least one `nature`:`party` pair from the list removed. - :param str type_added: An optional string of comma-separated `nature`:`party` - pairs. If this is specified, the service filters the output to include only - feedback that has at least one `nature`:`party` pair from the list removed. - :param str type_not_changed: An optional string of comma-separated - `nature`:`party` pairs. If this is specified, the service filters the output to - include only feedback that has at least one `nature`:`party` pair from the list - unchanged. - :param int page_limit: An optional integer specifying the number of documents that - you want the service to return. - :param str cursor: An optional string that returns the set of documents after the - previous set. Use this parameter with the `page_limit` parameter. - :param str sort: An optional comma-separated list of fields in the document to - sort on. You can optionally specify the sort direction by prefixing the value of - the field with `-` for descending order or `+` for ascending order (the default). - Currently permitted sorting fields are `created`, `user_id`, and `document_title`. - :param bool include_total: An optional boolean value. If specified as `true`, the - `pagination` object in the output includes a value called `total` that gives the - total count of feedback created. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('compare-comply', 'V1', 'list_feedback') - headers.update(sdk_headers) - - params = { - 'version': self.version, - 'feedback_type': feedback_type, - 'before': before, - 'after': after, - 'document_title': document_title, - 'model_id': model_id, - 'model_version': model_version, - 'category_removed': category_removed, - 'category_added': category_added, - 'category_not_changed': category_not_changed, - 'type_removed': type_removed, - 'type_added': type_added, - 'type_not_changed': type_not_changed, - 'page_limit': page_limit, - 'cursor': cursor, - 'sort': sort, - 'include_total': include_total - } - - url = '/v1/feedback' - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - ######################### - # Batches - ######################### - - def create_batch(self, - function, - input_credentials_file, - input_bucket_location, - input_bucket_name, - output_credentials_file, - output_bucket_location, - output_bucket_name, - model=None, - **kwargs): - """ - Submit a batch-processing request. - - Run Compare and Comply methods over a collection of input documents. - **Important:** Batch processing requires the use of the [IBM Cloud Object Storage - service](https://cloud.ibm.com/docs/services/cloud-object-storage/about-cos.html#about-ibm-cloud-object-storage). - The use of IBM Cloud Object Storage with Compare and Comply is discussed at [Using - batch - processing](https://cloud.ibm.com/docs/services/compare-comply/batching.html#before-you-batch). - - :param str function: The Compare and Comply method to run across the submitted - input documents. - :param file input_credentials_file: A JSON file containing the input Cloud Object - Storage credentials. At a minimum, the credentials must enable `READ` permissions - on the bucket defined by the `input_bucket_name` parameter. - :param str input_bucket_location: The geographical location of the Cloud Object - Storage input bucket as listed on the **Endpoint** tab of your Cloud Object - Storage instance; for example, `us-geo`, `eu-geo`, or `ap-geo`. - :param str input_bucket_name: The name of the Cloud Object Storage input bucket. - :param file output_credentials_file: A JSON file that lists the Cloud Object - Storage output credentials. At a minimum, the credentials must enable `READ` and - `WRITE` permissions on the bucket defined by the `output_bucket_name` parameter. - :param str output_bucket_location: The geographical location of the Cloud Object - Storage output bucket as listed on the **Endpoint** tab of your Cloud Object - Storage instance; for example, `us-geo`, `eu-geo`, or `ap-geo`. - :param str output_bucket_name: The name of the Cloud Object Storage output bucket. - :param str model: The analysis model to be used by the service. For the **Element - classification** and **Compare two documents** methods, the default is - `contracts`. For the **Extract tables** method, the default is `tables`. These - defaults apply to the standalone methods as well as to the methods' use in - batch-processing requests. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if function is None: - raise ValueError('function must be provided') - if input_credentials_file is None: - raise ValueError('input_credentials_file must be provided') - if input_bucket_location is None: - raise ValueError('input_bucket_location must be provided') - if input_bucket_name is None: - raise ValueError('input_bucket_name must be provided') - if output_credentials_file is None: - raise ValueError('output_credentials_file must be provided') - if output_bucket_location is None: - raise ValueError('output_bucket_location must be provided') - if output_bucket_name is None: - raise ValueError('output_bucket_name must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('compare-comply', 'V1', 'create_batch') - headers.update(sdk_headers) - - params = {'version': self.version, 'function': function, 'model': model} - - form_data = {} - form_data['input_credentials_file'] = (None, input_credentials_file, - 'application/json') - form_data['input_bucket_location'] = (None, input_bucket_location, - 'text/plain') - form_data['input_bucket_name'] = (None, input_bucket_name, 'text/plain') - form_data['output_credentials_file'] = (None, output_credentials_file, - 'application/json') - form_data['output_bucket_location'] = (None, output_bucket_location, - 'text/plain') - form_data['output_bucket_name'] = (None, output_bucket_name, - 'text/plain') - - url = '/v1/batches' - response = self.request( - method='POST', - url=url, - headers=headers, - params=params, - files=form_data, - accept_json=True) - return response - - def get_batch(self, batch_id, **kwargs): - """ - Get information about a specific batch-processing job. - - Gets information about a batch-processing job with a specified ID. - - :param str batch_id: The ID of the batch-processing job whose information you want - to retrieve. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if batch_id is None: - raise ValueError('batch_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('compare-comply', 'V1', 'get_batch') - headers.update(sdk_headers) - - params = {'version': self.version} - - url = '/v1/batches/{0}'.format(*self._encode_path_vars(batch_id)) - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def list_batches(self, **kwargs): - """ - List submitted batch-processing jobs. - - Lists batch-processing jobs submitted by users. - - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('compare-comply', 'V1', 'list_batches') - headers.update(sdk_headers) - - params = {'version': self.version} - - url = '/v1/batches' - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def update_batch(self, batch_id, action, model=None, **kwargs): - """ - Update a pending or active batch-processing job. - - Updates a pending or active batch-processing job. You can rescan the input bucket - to check for new documents or cancel a job. - - :param str batch_id: The ID of the batch-processing job you want to update. - :param str action: The action you want to perform on the specified - batch-processing job. - :param str model: The analysis model to be used by the service. For the **Element - classification** and **Compare two documents** methods, the default is - `contracts`. For the **Extract tables** method, the default is `tables`. These - defaults apply to the standalone methods as well as to the methods' use in - batch-processing requests. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if batch_id is None: - raise ValueError('batch_id must be provided') - if action is None: - raise ValueError('action must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('compare-comply', 'V1', 'update_batch') - headers.update(sdk_headers) - - params = {'version': self.version, 'action': action, 'model': model} - - url = '/v1/batches/{0}'.format(*self._encode_path_vars(batch_id)) - response = self.request( - method='PUT', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - -############################################################################## -# Models -############################################################################## - - -class Address(object): - """ - A party's address. - - :attr str text: (optional) A string listing the address. - :attr Location location: (optional) The numeric location of the identified element in - the document, represented with two integers labeled `begin` and `end`. - """ - - def __init__(self, text=None, location=None): - """ - Initialize a Address object. - - :param str text: (optional) A string listing the address. - :param Location location: (optional) The numeric location of the identified - element in the document, represented with two integers labeled `begin` and `end`. - """ - self.text = text - self.location = location - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Address object from a json dictionary.""" - args = {} - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'location' in _dict: - args['location'] = Location._from_dict(_dict.get('location')) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - if hasattr(self, 'location') and self.location is not None: - _dict['location'] = self.location._to_dict() - return _dict - - def __str__(self): - """Return a `str` version of this Address object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class AlignedElement(object): - """ - AlignedElement. - - :attr list[ElementPair] element_pair: (optional) Identifies two elements that - semantically align between the compared documents. - :attr bool identical_text: (optional) Specifies whether the aligned element is - identical. Elements are considered identical despite minor differences such as leading - punctuation, end-of-sentence punctuation, whitespace, the presence or absence of - definite or indefinite articles, and others. - :attr list[str] provenance_ids: (optional) One or more hashed values that you can send - to IBM to provide feedback or receive support. - :attr bool significant_elements: (optional) Indicates that the elements aligned are - contractual clauses of significance. - """ - - def __init__(self, - element_pair=None, - identical_text=None, - provenance_ids=None, - significant_elements=None): - """ - Initialize a AlignedElement object. - - :param list[ElementPair] element_pair: (optional) Identifies two elements that - semantically align between the compared documents. - :param bool identical_text: (optional) Specifies whether the aligned element is - identical. Elements are considered identical despite minor differences such as - leading punctuation, end-of-sentence punctuation, whitespace, the presence or - absence of definite or indefinite articles, and others. - :param list[str] provenance_ids: (optional) One or more hashed values that you can - send to IBM to provide feedback or receive support. - :param bool significant_elements: (optional) Indicates that the elements aligned - are contractual clauses of significance. - """ - self.element_pair = element_pair - self.identical_text = identical_text - self.provenance_ids = provenance_ids - self.significant_elements = significant_elements - - @classmethod - def _from_dict(cls, _dict): - """Initialize a AlignedElement object from a json dictionary.""" - args = {} - if 'element_pair' in _dict: - args['element_pair'] = [ - ElementPair._from_dict(x) for x in (_dict.get('element_pair')) - ] - if 'identical_text' in _dict: - args['identical_text'] = _dict.get('identical_text') - if 'provenance_ids' in _dict: - args['provenance_ids'] = _dict.get('provenance_ids') - if 'significant_elements' in _dict: - args['significant_elements'] = _dict.get('significant_elements') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'element_pair') and self.element_pair is not None: - _dict['element_pair'] = [x._to_dict() for x in self.element_pair] - if hasattr(self, 'identical_text') and self.identical_text is not None: - _dict['identical_text'] = self.identical_text - if hasattr(self, 'provenance_ids') and self.provenance_ids is not None: - _dict['provenance_ids'] = self.provenance_ids - if hasattr(self, 'significant_elements' - ) and self.significant_elements is not None: - _dict['significant_elements'] = self.significant_elements - return _dict - - def __str__(self): - """Return a `str` version of this AlignedElement object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Attribute(object): - """ - List of document attributes. - - :attr str type: (optional) The type of attribute. - :attr str text: (optional) The text associated with the attribute. - :attr Location location: (optional) The numeric location of the identified element in - the document, represented with two integers labeled `begin` and `end`. - """ - - def __init__(self, type=None, text=None, location=None): - """ - Initialize a Attribute object. - - :param str type: (optional) The type of attribute. - :param str text: (optional) The text associated with the attribute. - :param Location location: (optional) The numeric location of the identified - element in the document, represented with two integers labeled `begin` and `end`. - """ - self.type = type - self.text = text - self.location = location - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Attribute object from a json dictionary.""" - args = {} - if 'type' in _dict: - args['type'] = _dict.get('type') - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'location' in _dict: - args['location'] = Location._from_dict(_dict.get('location')) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'type') and self.type is not None: - _dict['type'] = self.type - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - if hasattr(self, 'location') and self.location is not None: - _dict['location'] = self.location._to_dict() - return _dict - - def __str__(self): - """Return a `str` version of this Attribute object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class BatchStatus(object): - """ - The batch-request status. - - :attr str function: (optional) The method to be run against the documents. Possible - values are `html_conversion`, `element_classification`, and `tables`. - :attr str input_bucket_location: (optional) The geographical location of the Cloud - Object Storage input bucket as listed on the **Endpoint** tab of your COS instance; - for example, `us-geo`, `eu-geo`, or `ap-geo`. - :attr str input_bucket_name: (optional) The name of the Cloud Object Storage input - bucket. - :attr str output_bucket_location: (optional) The geographical location of the Cloud - Object Storage output bucket as listed on the **Endpoint** tab of your COS instance; - for example, `us-geo`, `eu-geo`, or `ap-geo`. - :attr str output_bucket_name: (optional) The name of the Cloud Object Storage output - bucket. - :attr str batch_id: (optional) The unique identifier for the batch request. - :attr DocCounts document_counts: (optional) Document counts. - :attr str status: (optional) The status of the batch request. - :attr datetime created: (optional) The creation time of the batch request. - :attr datetime updated: (optional) The time of the most recent update to the batch - request. - """ - - def __init__(self, - function=None, - input_bucket_location=None, - input_bucket_name=None, - output_bucket_location=None, - output_bucket_name=None, - batch_id=None, - document_counts=None, - status=None, - created=None, - updated=None): - """ - Initialize a BatchStatus object. - - :param str function: (optional) The method to be run against the documents. - Possible values are `html_conversion`, `element_classification`, and `tables`. - :param str input_bucket_location: (optional) The geographical location of the - Cloud Object Storage input bucket as listed on the **Endpoint** tab of your COS - instance; for example, `us-geo`, `eu-geo`, or `ap-geo`. - :param str input_bucket_name: (optional) The name of the Cloud Object Storage - input bucket. - :param str output_bucket_location: (optional) The geographical location of the - Cloud Object Storage output bucket as listed on the **Endpoint** tab of your COS - instance; for example, `us-geo`, `eu-geo`, or `ap-geo`. - :param str output_bucket_name: (optional) The name of the Cloud Object Storage - output bucket. - :param str batch_id: (optional) The unique identifier for the batch request. - :param DocCounts document_counts: (optional) Document counts. - :param str status: (optional) The status of the batch request. - :param datetime created: (optional) The creation time of the batch request. - :param datetime updated: (optional) The time of the most recent update to the - batch request. - """ - self.function = function - self.input_bucket_location = input_bucket_location - self.input_bucket_name = input_bucket_name - self.output_bucket_location = output_bucket_location - self.output_bucket_name = output_bucket_name - self.batch_id = batch_id - self.document_counts = document_counts - self.status = status - self.created = created - self.updated = updated - - @classmethod - def _from_dict(cls, _dict): - """Initialize a BatchStatus object from a json dictionary.""" - args = {} - if 'function' in _dict: - args['function'] = _dict.get('function') - if 'input_bucket_location' in _dict: - args['input_bucket_location'] = _dict.get('input_bucket_location') - if 'input_bucket_name' in _dict: - args['input_bucket_name'] = _dict.get('input_bucket_name') - if 'output_bucket_location' in _dict: - args['output_bucket_location'] = _dict.get('output_bucket_location') - if 'output_bucket_name' in _dict: - args['output_bucket_name'] = _dict.get('output_bucket_name') - if 'batch_id' in _dict: - args['batch_id'] = _dict.get('batch_id') - if 'document_counts' in _dict: - args['document_counts'] = DocCounts._from_dict( - _dict.get('document_counts')) - if 'status' in _dict: - args['status'] = _dict.get('status') - if 'created' in _dict: - args['created'] = string_to_datetime(_dict.get('created')) - if 'updated' in _dict: - args['updated'] = string_to_datetime(_dict.get('updated')) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'function') and self.function is not None: - _dict['function'] = self.function - if hasattr(self, 'input_bucket_location' - ) and self.input_bucket_location is not None: - _dict['input_bucket_location'] = self.input_bucket_location - if hasattr(self, - 'input_bucket_name') and self.input_bucket_name is not None: - _dict['input_bucket_name'] = self.input_bucket_name - if hasattr(self, 'output_bucket_location' - ) and self.output_bucket_location is not None: - _dict['output_bucket_location'] = self.output_bucket_location - if hasattr( - self, - 'output_bucket_name') and self.output_bucket_name is not None: - _dict['output_bucket_name'] = self.output_bucket_name - if hasattr(self, 'batch_id') and self.batch_id is not None: - _dict['batch_id'] = self.batch_id - if hasattr(self, - 'document_counts') and self.document_counts is not None: - _dict['document_counts'] = self.document_counts._to_dict() - if hasattr(self, 'status') and self.status is not None: - _dict['status'] = self.status - if hasattr(self, 'created') and self.created is not None: - _dict['created'] = datetime_to_string(self.created) - if hasattr(self, 'updated') and self.updated is not None: - _dict['updated'] = datetime_to_string(self.updated) - return _dict - - def __str__(self): - """Return a `str` version of this BatchStatus object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Batches(object): - """ - The results of a successful `GET /v1/batches` request. - - :attr list[BatchStatus] batches: (optional) A list of the status of all batch - requests. - """ - - def __init__(self, batches=None): - """ - Initialize a Batches object. - - :param list[BatchStatus] batches: (optional) A list of the status of all batch - requests. - """ - self.batches = batches - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Batches object from a json dictionary.""" - args = {} - if 'batches' in _dict: - args['batches'] = [ - BatchStatus._from_dict(x) for x in (_dict.get('batches')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'batches') and self.batches is not None: - _dict['batches'] = [x._to_dict() for x in self.batches] - return _dict - - def __str__(self): - """Return a `str` version of this Batches object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class BodyCells(object): - """ - Cells that are not table header, column header, or row header cells. - - :attr str cell_id: (optional) The unique ID of the cell in the current table. - :attr Location location: (optional) The numeric location of the identified element in - the document, represented with two integers labeled `begin` and `end`. - :attr str text: (optional) The textual contents of this cell from the input document - without associated markup content. - :attr int row_index_begin: (optional) The `begin` index of this cell's `row` location - in the current table. - :attr int row_index_end: (optional) The `end` index of this cell's `row` location in - the current table. - :attr int column_index_begin: (optional) The `begin` index of this cell's `column` - location in the current table. - :attr int column_index_end: (optional) The `end` index of this cell's `column` - location in the current table. - :attr list[RowHeaderIds] row_header_ids: (optional) - :attr list[RowHeaderTexts] row_header_texts: (optional) - :attr list[RowHeaderTextsNormalized] row_header_texts_normalized: (optional) - :attr list[ColumnHeaderIds] column_header_ids: (optional) - :attr list[ColumnHeaderTexts] column_header_texts: (optional) - :attr list[ColumnHeaderTextsNormalized] column_header_texts_normalized: (optional) - :attr list[Attribute] attributes: (optional) - """ - - def __init__(self, - cell_id=None, - location=None, - text=None, - row_index_begin=None, - row_index_end=None, - column_index_begin=None, - column_index_end=None, - row_header_ids=None, - row_header_texts=None, - row_header_texts_normalized=None, - column_header_ids=None, - column_header_texts=None, - column_header_texts_normalized=None, - attributes=None): - """ - Initialize a BodyCells object. - - :param str cell_id: (optional) The unique ID of the cell in the current table. - :param Location location: (optional) The numeric location of the identified - element in the document, represented with two integers labeled `begin` and `end`. - :param str text: (optional) The textual contents of this cell from the input - document without associated markup content. - :param int row_index_begin: (optional) The `begin` index of this cell's `row` - location in the current table. - :param int row_index_end: (optional) The `end` index of this cell's `row` location - in the current table. - :param int column_index_begin: (optional) The `begin` index of this cell's - `column` location in the current table. - :param int column_index_end: (optional) The `end` index of this cell's `column` - location in the current table. - :param list[RowHeaderIds] row_header_ids: (optional) - :param list[RowHeaderTexts] row_header_texts: (optional) - :param list[RowHeaderTextsNormalized] row_header_texts_normalized: (optional) - :param list[ColumnHeaderIds] column_header_ids: (optional) - :param list[ColumnHeaderTexts] column_header_texts: (optional) - :param list[ColumnHeaderTextsNormalized] column_header_texts_normalized: - (optional) - :param list[Attribute] attributes: (optional) - """ - self.cell_id = cell_id - self.location = location - self.text = text - self.row_index_begin = row_index_begin - self.row_index_end = row_index_end - self.column_index_begin = column_index_begin - self.column_index_end = column_index_end - self.row_header_ids = row_header_ids - self.row_header_texts = row_header_texts - self.row_header_texts_normalized = row_header_texts_normalized - self.column_header_ids = column_header_ids - self.column_header_texts = column_header_texts - self.column_header_texts_normalized = column_header_texts_normalized - self.attributes = attributes - - @classmethod - def _from_dict(cls, _dict): - """Initialize a BodyCells object from a json dictionary.""" - args = {} - if 'cell_id' in _dict: - args['cell_id'] = _dict.get('cell_id') - if 'location' in _dict: - args['location'] = Location._from_dict(_dict.get('location')) - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'row_index_begin' in _dict: - args['row_index_begin'] = _dict.get('row_index_begin') - if 'row_index_end' in _dict: - args['row_index_end'] = _dict.get('row_index_end') - if 'column_index_begin' in _dict: - args['column_index_begin'] = _dict.get('column_index_begin') - if 'column_index_end' in _dict: - args['column_index_end'] = _dict.get('column_index_end') - if 'row_header_ids' in _dict: - args['row_header_ids'] = [ - RowHeaderIds._from_dict(x) - for x in (_dict.get('row_header_ids')) - ] - if 'row_header_texts' in _dict: - args['row_header_texts'] = [ - RowHeaderTexts._from_dict(x) - for x in (_dict.get('row_header_texts')) - ] - if 'row_header_texts_normalized' in _dict: - args['row_header_texts_normalized'] = [ - RowHeaderTextsNormalized._from_dict(x) - for x in (_dict.get('row_header_texts_normalized')) - ] - if 'column_header_ids' in _dict: - args['column_header_ids'] = [ - ColumnHeaderIds._from_dict(x) - for x in (_dict.get('column_header_ids')) - ] - if 'column_header_texts' in _dict: - args['column_header_texts'] = [ - ColumnHeaderTexts._from_dict(x) - for x in (_dict.get('column_header_texts')) - ] - if 'column_header_texts_normalized' in _dict: - args['column_header_texts_normalized'] = [ - ColumnHeaderTextsNormalized._from_dict(x) - for x in (_dict.get('column_header_texts_normalized')) - ] - if 'attributes' in _dict: - args['attributes'] = [ - Attribute._from_dict(x) for x in (_dict.get('attributes')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'cell_id') and self.cell_id is not None: - _dict['cell_id'] = self.cell_id - if hasattr(self, 'location') and self.location is not None: - _dict['location'] = self.location._to_dict() - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - if hasattr(self, - 'row_index_begin') and self.row_index_begin is not None: - _dict['row_index_begin'] = self.row_index_begin - if hasattr(self, 'row_index_end') and self.row_index_end is not None: - _dict['row_index_end'] = self.row_index_end - if hasattr( - self, - 'column_index_begin') and self.column_index_begin is not None: - _dict['column_index_begin'] = self.column_index_begin - if hasattr(self, - 'column_index_end') and self.column_index_end is not None: - _dict['column_index_end'] = self.column_index_end - if hasattr(self, 'row_header_ids') and self.row_header_ids is not None: - _dict['row_header_ids'] = [ - x._to_dict() for x in self.row_header_ids - ] - if hasattr(self, - 'row_header_texts') and self.row_header_texts is not None: - _dict['row_header_texts'] = [ - x._to_dict() for x in self.row_header_texts - ] - if hasattr(self, 'row_header_texts_normalized' - ) and self.row_header_texts_normalized is not None: - _dict['row_header_texts_normalized'] = [ - x._to_dict() for x in self.row_header_texts_normalized - ] - if hasattr(self, - 'column_header_ids') and self.column_header_ids is not None: - _dict['column_header_ids'] = [ - x._to_dict() for x in self.column_header_ids - ] - if hasattr( - self, - 'column_header_texts') and self.column_header_texts is not None: - _dict['column_header_texts'] = [ - x._to_dict() for x in self.column_header_texts - ] - if hasattr(self, 'column_header_texts_normalized' - ) and self.column_header_texts_normalized is not None: - _dict['column_header_texts_normalized'] = [ - x._to_dict() for x in self.column_header_texts_normalized - ] - if hasattr(self, 'attributes') and self.attributes is not None: - _dict['attributes'] = [x._to_dict() for x in self.attributes] - return _dict - - def __str__(self): - """Return a `str` version of this BodyCells object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Category(object): - """ - Information defining an element's subject matter. - - :attr str label: (optional) The category of the associated element. - :attr list[str] provenance_ids: (optional) One or more hashed values that you can send - to IBM to provide feedback or receive support. - """ - - def __init__(self, label=None, provenance_ids=None): - """ - Initialize a Category object. - - :param str label: (optional) The category of the associated element. - :param list[str] provenance_ids: (optional) One or more hashed values that you can - send to IBM to provide feedback or receive support. - """ - self.label = label - self.provenance_ids = provenance_ids - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Category object from a json dictionary.""" - args = {} - if 'label' in _dict: - args['label'] = _dict.get('label') - if 'provenance_ids' in _dict: - args['provenance_ids'] = _dict.get('provenance_ids') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'label') and self.label is not None: - _dict['label'] = self.label - if hasattr(self, 'provenance_ids') and self.provenance_ids is not None: - _dict['provenance_ids'] = self.provenance_ids - return _dict - - def __str__(self): - """Return a `str` version of this Category object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class CategoryComparison(object): - """ - Information defining an element's subject matter. - - :attr str label: (optional) The category of the associated element. - """ - - def __init__(self, label=None): - """ - Initialize a CategoryComparison object. - - :param str label: (optional) The category of the associated element. - """ - self.label = label - - @classmethod - def _from_dict(cls, _dict): - """Initialize a CategoryComparison object from a json dictionary.""" - args = {} - if 'label' in _dict: - args['label'] = _dict.get('label') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'label') and self.label is not None: - _dict['label'] = self.label - return _dict - - def __str__(self): - """Return a `str` version of this CategoryComparison object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class ClassifyReturn(object): - """ - The analysis of objects returned by the **Element classification** method. - - :attr Document document: (optional) Basic information about the input document. - :attr str model_id: (optional) The analysis model used to classify the input document. - For the **Element classification** method, the only valid value is `contracts`. - :attr str model_version: (optional) The version of the analysis model identified by - the value of the `model_id` key. - :attr list[Element] elements: (optional) Document elements identified by the service. - :attr list[Tables] tables: (optional) Definition of tables identified in the input - document. - :attr DocStructure document_structure: (optional) The structure of the input document. - :attr list[Parties] parties: (optional) Definitions of the parties identified in the - input document. - :attr list[EffectiveDates] effective_dates: (optional) The date or dates on which the - document becomes effective. - :attr list[ContractAmts] contract_amounts: (optional) The monetary amounts that - identify the total amount of the contract that needs to be paid from one party to - another. - :attr list[TerminationDates] termination_dates: (optional) The date or dates on which - the document is to be terminated. - :attr list[ContractType] contract_type: (optional) The document's contract type or - types as declared in the document. - """ - - def __init__(self, - document=None, - model_id=None, - model_version=None, - elements=None, - tables=None, - document_structure=None, - parties=None, - effective_dates=None, - contract_amounts=None, - termination_dates=None, - contract_type=None): - """ - Initialize a ClassifyReturn object. - - :param Document document: (optional) Basic information about the input document. - :param str model_id: (optional) The analysis model used to classify the input - document. For the **Element classification** method, the only valid value is - `contracts`. - :param str model_version: (optional) The version of the analysis model identified - by the value of the `model_id` key. - :param list[Element] elements: (optional) Document elements identified by the - service. - :param list[Tables] tables: (optional) Definition of tables identified in the - input document. - :param DocStructure document_structure: (optional) The structure of the input - document. - :param list[Parties] parties: (optional) Definitions of the parties identified in - the input document. - :param list[EffectiveDates] effective_dates: (optional) The date or dates on which - the document becomes effective. - :param list[ContractAmts] contract_amounts: (optional) The monetary amounts that - identify the total amount of the contract that needs to be paid from one party to - another. - :param list[TerminationDates] termination_dates: (optional) The date or dates on - which the document is to be terminated. - :param list[ContractType] contract_type: (optional) The document's contract type - or types as declared in the document. - """ - self.document = document - self.model_id = model_id - self.model_version = model_version - self.elements = elements - self.tables = tables - self.document_structure = document_structure - self.parties = parties - self.effective_dates = effective_dates - self.contract_amounts = contract_amounts - self.termination_dates = termination_dates - self.contract_type = contract_type - - @classmethod - def _from_dict(cls, _dict): - """Initialize a ClassifyReturn object from a json dictionary.""" - args = {} - if 'document' in _dict: - args['document'] = Document._from_dict(_dict.get('document')) - if 'model_id' in _dict: - args['model_id'] = _dict.get('model_id') - if 'model_version' in _dict: - args['model_version'] = _dict.get('model_version') - if 'elements' in _dict: - args['elements'] = [ - Element._from_dict(x) for x in (_dict.get('elements')) - ] - if 'tables' in _dict: - args['tables'] = [ - Tables._from_dict(x) for x in (_dict.get('tables')) - ] - if 'document_structure' in _dict: - args['document_structure'] = DocStructure._from_dict( - _dict.get('document_structure')) - if 'parties' in _dict: - args['parties'] = [ - Parties._from_dict(x) for x in (_dict.get('parties')) - ] - if 'effective_dates' in _dict: - args['effective_dates'] = [ - EffectiveDates._from_dict(x) - for x in (_dict.get('effective_dates')) - ] - if 'contract_amounts' in _dict: - args['contract_amounts'] = [ - ContractAmts._from_dict(x) - for x in (_dict.get('contract_amounts')) - ] - if 'termination_dates' in _dict: - args['termination_dates'] = [ - TerminationDates._from_dict(x) - for x in (_dict.get('termination_dates')) - ] - if 'contract_type' in _dict: - args['contract_type'] = [ - ContractType._from_dict(x) for x in (_dict.get('contract_type')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'document') and self.document is not None: - _dict['document'] = self.document._to_dict() - if hasattr(self, 'model_id') and self.model_id is not None: - _dict['model_id'] = self.model_id - if hasattr(self, 'model_version') and self.model_version is not None: - _dict['model_version'] = self.model_version - if hasattr(self, 'elements') and self.elements is not None: - _dict['elements'] = [x._to_dict() for x in self.elements] - if hasattr(self, 'tables') and self.tables is not None: - _dict['tables'] = [x._to_dict() for x in self.tables] - if hasattr( - self, - 'document_structure') and self.document_structure is not None: - _dict['document_structure'] = self.document_structure._to_dict() - if hasattr(self, 'parties') and self.parties is not None: - _dict['parties'] = [x._to_dict() for x in self.parties] - if hasattr(self, - 'effective_dates') and self.effective_dates is not None: - _dict['effective_dates'] = [ - x._to_dict() for x in self.effective_dates - ] - if hasattr(self, - 'contract_amounts') and self.contract_amounts is not None: - _dict['contract_amounts'] = [ - x._to_dict() for x in self.contract_amounts - ] - if hasattr(self, - 'termination_dates') and self.termination_dates is not None: - _dict['termination_dates'] = [ - x._to_dict() for x in self.termination_dates - ] - if hasattr(self, 'contract_type') and self.contract_type is not None: - _dict['contract_type'] = [x._to_dict() for x in self.contract_type] - return _dict - - def __str__(self): - """Return a `str` version of this ClassifyReturn object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class ColumnHeaderIds(object): - """ - An array of values, each being the `id` value of a column header that is applicable to - the current cell. - - :attr str id: (optional) The `id` value of a column header. - """ - - def __init__(self, id=None): - """ - Initialize a ColumnHeaderIds object. - - :param str id: (optional) The `id` value of a column header. - """ - self.id = id - - @classmethod - def _from_dict(cls, _dict): - """Initialize a ColumnHeaderIds object from a json dictionary.""" - args = {} - if 'id' in _dict: - args['id'] = _dict.get('id') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'id') and self.id is not None: - _dict['id'] = self.id - return _dict - - def __str__(self): - """Return a `str` version of this ColumnHeaderIds object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class ColumnHeaderTexts(object): - """ - An array of values, each being the `text` value of a column header that is applicable - to the current cell. - - :attr str text: (optional) The `text` value of a column header. - """ - - def __init__(self, text=None): - """ - Initialize a ColumnHeaderTexts object. - - :param str text: (optional) The `text` value of a column header. - """ - self.text = text - - @classmethod - def _from_dict(cls, _dict): - """Initialize a ColumnHeaderTexts object from a json dictionary.""" - args = {} - if 'text' in _dict: - args['text'] = _dict.get('text') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - return _dict - - def __str__(self): - """Return a `str` version of this ColumnHeaderTexts object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class ColumnHeaderTextsNormalized(object): - """ - If you provide customization input, the normalized version of the column header texts - according to the customization; otherwise, the same value as `column_header_texts`. - - :attr str text_normalized: (optional) The normalized version of a column header text. - """ - - def __init__(self, text_normalized=None): - """ - Initialize a ColumnHeaderTextsNormalized object. - - :param str text_normalized: (optional) The normalized version of a column header - text. - """ - self.text_normalized = text_normalized - - @classmethod - def _from_dict(cls, _dict): - """Initialize a ColumnHeaderTextsNormalized object from a json dictionary.""" - args = {} - if 'text_normalized' in _dict: - args['text_normalized'] = _dict.get('text_normalized') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, - 'text_normalized') and self.text_normalized is not None: - _dict['text_normalized'] = self.text_normalized - return _dict - - def __str__(self): - """Return a `str` version of this ColumnHeaderTextsNormalized object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class ColumnHeaders(object): - """ - Column-level cells, each applicable as a header to other cells in the same column as - itself, of the current table. - - :attr str cell_id: (optional) The unique ID of the cell in the current table. - :attr object location: (optional) The location of the column header cell in the - current table as defined by its `begin` and `end` offsets, respectfully, in the input - document. - :attr str text: (optional) The textual contents of this cell from the input document - without associated markup content. - :attr str text_normalized: (optional) If you provide customization input, the - normalized version of the cell text according to the customization; otherwise, the - same value as `text`. - :attr int row_index_begin: (optional) The `begin` index of this cell's `row` location - in the current table. - :attr int row_index_end: (optional) The `end` index of this cell's `row` location in - the current table. - :attr int column_index_begin: (optional) The `begin` index of this cell's `column` - location in the current table. - :attr int column_index_end: (optional) The `end` index of this cell's `column` - location in the current table. - """ - - def __init__(self, - cell_id=None, - location=None, - text=None, - text_normalized=None, - row_index_begin=None, - row_index_end=None, - column_index_begin=None, - column_index_end=None): - """ - Initialize a ColumnHeaders object. - - :param str cell_id: (optional) The unique ID of the cell in the current table. - :param object location: (optional) The location of the column header cell in the - current table as defined by its `begin` and `end` offsets, respectfully, in the - input document. - :param str text: (optional) The textual contents of this cell from the input - document without associated markup content. - :param str text_normalized: (optional) If you provide customization input, the - normalized version of the cell text according to the customization; otherwise, the - same value as `text`. - :param int row_index_begin: (optional) The `begin` index of this cell's `row` - location in the current table. - :param int row_index_end: (optional) The `end` index of this cell's `row` location - in the current table. - :param int column_index_begin: (optional) The `begin` index of this cell's - `column` location in the current table. - :param int column_index_end: (optional) The `end` index of this cell's `column` - location in the current table. - """ - self.cell_id = cell_id - self.location = location - self.text = text - self.text_normalized = text_normalized - self.row_index_begin = row_index_begin - self.row_index_end = row_index_end - self.column_index_begin = column_index_begin - self.column_index_end = column_index_end - - @classmethod - def _from_dict(cls, _dict): - """Initialize a ColumnHeaders object from a json dictionary.""" - args = {} - if 'cell_id' in _dict: - args['cell_id'] = _dict.get('cell_id') - if 'location' in _dict: - args['location'] = _dict.get('location') - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'text_normalized' in _dict: - args['text_normalized'] = _dict.get('text_normalized') - if 'row_index_begin' in _dict: - args['row_index_begin'] = _dict.get('row_index_begin') - if 'row_index_end' in _dict: - args['row_index_end'] = _dict.get('row_index_end') - if 'column_index_begin' in _dict: - args['column_index_begin'] = _dict.get('column_index_begin') - if 'column_index_end' in _dict: - args['column_index_end'] = _dict.get('column_index_end') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'cell_id') and self.cell_id is not None: - _dict['cell_id'] = self.cell_id - if hasattr(self, 'location') and self.location is not None: - _dict['location'] = self.location - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - if hasattr(self, - 'text_normalized') and self.text_normalized is not None: - _dict['text_normalized'] = self.text_normalized - if hasattr(self, - 'row_index_begin') and self.row_index_begin is not None: - _dict['row_index_begin'] = self.row_index_begin - if hasattr(self, 'row_index_end') and self.row_index_end is not None: - _dict['row_index_end'] = self.row_index_end - if hasattr( - self, - 'column_index_begin') and self.column_index_begin is not None: - _dict['column_index_begin'] = self.column_index_begin - if hasattr(self, - 'column_index_end') and self.column_index_end is not None: - _dict['column_index_end'] = self.column_index_end - return _dict - - def __str__(self): - """Return a `str` version of this ColumnHeaders object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class CompareReturn(object): - """ - The comparison of the two submitted documents. - - :attr str model_id: (optional) The analysis model used to compare the input documents. - For the **Compare two documents** method, the only valid value is `contracts`. - :attr str model_version: (optional) The version of the analysis model identified by - the value of the `model_id` key. - :attr list[Document] documents: (optional) Information about the documents being - compared. - :attr list[AlignedElement] aligned_elements: (optional) A list of pairs of elements - that semantically align between the compared documents. - :attr list[UnalignedElement] unaligned_elements: (optional) A list of elements that do - not semantically align between the compared documents. - """ - - def __init__(self, - model_id=None, - model_version=None, - documents=None, - aligned_elements=None, - unaligned_elements=None): - """ - Initialize a CompareReturn object. - - :param str model_id: (optional) The analysis model used to compare the input - documents. For the **Compare two documents** method, the only valid value is - `contracts`. - :param str model_version: (optional) The version of the analysis model identified - by the value of the `model_id` key. - :param list[Document] documents: (optional) Information about the documents being - compared. - :param list[AlignedElement] aligned_elements: (optional) A list of pairs of - elements that semantically align between the compared documents. - :param list[UnalignedElement] unaligned_elements: (optional) A list of elements - that do not semantically align between the compared documents. - """ - self.model_id = model_id - self.model_version = model_version - self.documents = documents - self.aligned_elements = aligned_elements - self.unaligned_elements = unaligned_elements - - @classmethod - def _from_dict(cls, _dict): - """Initialize a CompareReturn object from a json dictionary.""" - args = {} - if 'model_id' in _dict: - args['model_id'] = _dict.get('model_id') - if 'model_version' in _dict: - args['model_version'] = _dict.get('model_version') - if 'documents' in _dict: - args['documents'] = [ - Document._from_dict(x) for x in (_dict.get('documents')) - ] - if 'aligned_elements' in _dict: - args['aligned_elements'] = [ - AlignedElement._from_dict(x) - for x in (_dict.get('aligned_elements')) - ] - if 'unaligned_elements' in _dict: - args['unaligned_elements'] = [ - UnalignedElement._from_dict(x) - for x in (_dict.get('unaligned_elements')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'model_id') and self.model_id is not None: - _dict['model_id'] = self.model_id - if hasattr(self, 'model_version') and self.model_version is not None: - _dict['model_version'] = self.model_version - if hasattr(self, 'documents') and self.documents is not None: - _dict['documents'] = [x._to_dict() for x in self.documents] - if hasattr(self, - 'aligned_elements') and self.aligned_elements is not None: - _dict['aligned_elements'] = [ - x._to_dict() for x in self.aligned_elements - ] - if hasattr( - self, - 'unaligned_elements') and self.unaligned_elements is not None: - _dict['unaligned_elements'] = [ - x._to_dict() for x in self.unaligned_elements - ] - return _dict - - def __str__(self): - """Return a `str` version of this CompareReturn object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Contact(object): - """ - A contact. - - :attr str name: (optional) A string listing the name of the contact. - :attr str role: (optional) A string listing the role of the contact. - """ - - def __init__(self, name=None, role=None): - """ - Initialize a Contact object. - - :param str name: (optional) A string listing the name of the contact. - :param str role: (optional) A string listing the role of the contact. - """ - self.name = name - self.role = role - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Contact object from a json dictionary.""" - args = {} - if 'name' in _dict: - args['name'] = _dict.get('name') - if 'role' in _dict: - args['role'] = _dict.get('role') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'name') and self.name is not None: - _dict['name'] = self.name - if hasattr(self, 'role') and self.role is not None: - _dict['role'] = self.role - return _dict - - def __str__(self): - """Return a `str` version of this Contact object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class ContractAmts(object): - """ - A monetary amount identified in the input document. - - :attr str text: (optional) The monetary amount. - :attr str confidence_level: (optional) The confidence level in the identification of - the contract amount. - :attr Location location: (optional) The numeric location of the identified element in - the document, represented with two integers labeled `begin` and `end`. - """ - - def __init__(self, text=None, confidence_level=None, location=None): - """ - Initialize a ContractAmts object. - - :param str text: (optional) The monetary amount. - :param str confidence_level: (optional) The confidence level in the identification - of the contract amount. - :param Location location: (optional) The numeric location of the identified - element in the document, represented with two integers labeled `begin` and `end`. - """ - self.text = text - self.confidence_level = confidence_level - self.location = location - - @classmethod - def _from_dict(cls, _dict): - """Initialize a ContractAmts object from a json dictionary.""" - args = {} - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'confidence_level' in _dict: - args['confidence_level'] = _dict.get('confidence_level') - if 'location' in _dict: - args['location'] = Location._from_dict(_dict.get('location')) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - if hasattr(self, - 'confidence_level') and self.confidence_level is not None: - _dict['confidence_level'] = self.confidence_level - if hasattr(self, 'location') and self.location is not None: - _dict['location'] = self.location._to_dict() - return _dict - - def __str__(self): - """Return a `str` version of this ContractAmts object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class ContractType(object): - """ - The contract type identified in the input document. - - :attr str text: (optional) The contract type. - :attr str confidence_level: (optional) The confidence level in the identification of - the termination date. - :attr Location location: (optional) The numeric location of the identified element in - the document, represented with two integers labeled `begin` and `end`. - """ - - def __init__(self, text=None, confidence_level=None, location=None): - """ - Initialize a ContractType object. - - :param str text: (optional) The contract type. - :param str confidence_level: (optional) The confidence level in the identification - of the termination date. - :param Location location: (optional) The numeric location of the identified - element in the document, represented with two integers labeled `begin` and `end`. - """ - self.text = text - self.confidence_level = confidence_level - self.location = location - - @classmethod - def _from_dict(cls, _dict): - """Initialize a ContractType object from a json dictionary.""" - args = {} - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'confidence_level' in _dict: - args['confidence_level'] = _dict.get('confidence_level') - if 'location' in _dict: - args['location'] = Location._from_dict(_dict.get('location')) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - if hasattr(self, - 'confidence_level') and self.confidence_level is not None: - _dict['confidence_level'] = self.confidence_level - if hasattr(self, 'location') and self.location is not None: - _dict['location'] = self.location._to_dict() - return _dict - - def __str__(self): - """Return a `str` version of this ContractType object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class DocCounts(object): - """ - Document counts. - - :attr int total: (optional) Total number of documents. - :attr int pending: (optional) Number of pending documents. - :attr int successful: (optional) Number of documents successfully processed. - :attr int failed: (optional) Number of documents not successfully processed. - """ - - def __init__(self, total=None, pending=None, successful=None, failed=None): - """ - Initialize a DocCounts object. - - :param int total: (optional) Total number of documents. - :param int pending: (optional) Number of pending documents. - :param int successful: (optional) Number of documents successfully processed. - :param int failed: (optional) Number of documents not successfully processed. - """ - self.total = total - self.pending = pending - self.successful = successful - self.failed = failed - - @classmethod - def _from_dict(cls, _dict): - """Initialize a DocCounts object from a json dictionary.""" - args = {} - if 'total' in _dict: - args['total'] = _dict.get('total') - if 'pending' in _dict: - args['pending'] = _dict.get('pending') - if 'successful' in _dict: - args['successful'] = _dict.get('successful') - if 'failed' in _dict: - args['failed'] = _dict.get('failed') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'total') and self.total is not None: - _dict['total'] = self.total - if hasattr(self, 'pending') and self.pending is not None: - _dict['pending'] = self.pending - if hasattr(self, 'successful') and self.successful is not None: - _dict['successful'] = self.successful - if hasattr(self, 'failed') and self.failed is not None: - _dict['failed'] = self.failed - return _dict - - def __str__(self): - """Return a `str` version of this DocCounts object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class DocInfo(object): - """ - Information about the parsed input document. - - :attr str html: (optional) The full text of the parsed document in HTML format. - :attr str title: (optional) The title of the parsed document. If the service did not - detect a title, the value of this element is `null`. - :attr str hash: (optional) The MD5 hash of the input document. - """ - - def __init__(self, html=None, title=None, hash=None): - """ - Initialize a DocInfo object. - - :param str html: (optional) The full text of the parsed document in HTML format. - :param str title: (optional) The title of the parsed document. If the service did - not detect a title, the value of this element is `null`. - :param str hash: (optional) The MD5 hash of the input document. - """ - self.html = html - self.title = title - self.hash = hash - - @classmethod - def _from_dict(cls, _dict): - """Initialize a DocInfo object from a json dictionary.""" - args = {} - if 'html' in _dict: - args['html'] = _dict.get('html') - if 'title' in _dict: - args['title'] = _dict.get('title') - if 'hash' in _dict: - args['hash'] = _dict.get('hash') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'html') and self.html is not None: - _dict['html'] = self.html - if hasattr(self, 'title') and self.title is not None: - _dict['title'] = self.title - if hasattr(self, 'hash') and self.hash is not None: - _dict['hash'] = self.hash - return _dict - - def __str__(self): - """Return a `str` version of this DocInfo object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class DocStructure(object): - """ - The structure of the input document. - - :attr list[SectionTitles] section_titles: (optional) An array containing one object - per section or subsection identified in the input document. - :attr list[LeadingSentence] leading_sentences: (optional) An array containing one - object per section or subsection, in parallel with the `section_titles` array, that - details the leading sentences in the corresponding section or subsection. - """ - - def __init__(self, section_titles=None, leading_sentences=None): - """ - Initialize a DocStructure object. - - :param list[SectionTitles] section_titles: (optional) An array containing one - object per section or subsection identified in the input document. - :param list[LeadingSentence] leading_sentences: (optional) An array containing one - object per section or subsection, in parallel with the `section_titles` array, - that details the leading sentences in the corresponding section or subsection. - """ - self.section_titles = section_titles - self.leading_sentences = leading_sentences - - @classmethod - def _from_dict(cls, _dict): - """Initialize a DocStructure object from a json dictionary.""" - args = {} - if 'section_titles' in _dict: - args['section_titles'] = [ - SectionTitles._from_dict(x) - for x in (_dict.get('section_titles')) - ] - if 'leading_sentences' in _dict: - args['leading_sentences'] = [ - LeadingSentence._from_dict(x) - for x in (_dict.get('leading_sentences')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'section_titles') and self.section_titles is not None: - _dict['section_titles'] = [ - x._to_dict() for x in self.section_titles - ] - if hasattr(self, - 'leading_sentences') and self.leading_sentences is not None: - _dict['leading_sentences'] = [ - x._to_dict() for x in self.leading_sentences - ] - return _dict - - def __str__(self): - """Return a `str` version of this DocStructure object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Document(object): - """ - Basic information about the input document. - - :attr str title: (optional) Document title, if detected. - :attr str html: (optional) The input document converted into HTML format. - :attr str hash: (optional) The MD5 hash value of the input document. - :attr str label: (optional) The label applied to the input document with the calling - method's `file_1_label` or `file_2_label` value. This field is specified only in the - output of the **Comparing two documents** method. - """ - - def __init__(self, title=None, html=None, hash=None, label=None): - """ - Initialize a Document object. - - :param str title: (optional) Document title, if detected. - :param str html: (optional) The input document converted into HTML format. - :param str hash: (optional) The MD5 hash value of the input document. - :param str label: (optional) The label applied to the input document with the - calling method's `file_1_label` or `file_2_label` value. This field is specified - only in the output of the **Comparing two documents** method. - """ - self.title = title - self.html = html - self.hash = hash - self.label = label - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Document object from a json dictionary.""" - args = {} - if 'title' in _dict: - args['title'] = _dict.get('title') - if 'html' in _dict: - args['html'] = _dict.get('html') - if 'hash' in _dict: - args['hash'] = _dict.get('hash') - if 'label' in _dict: - args['label'] = _dict.get('label') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'title') and self.title is not None: - _dict['title'] = self.title - if hasattr(self, 'html') and self.html is not None: - _dict['html'] = self.html - if hasattr(self, 'hash') and self.hash is not None: - _dict['hash'] = self.hash - if hasattr(self, 'label') and self.label is not None: - _dict['label'] = self.label - return _dict - - def __str__(self): - """Return a `str` version of this Document object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class EffectiveDates(object): - """ - An effective date. - - :attr str text: (optional) The effective date, listed as a string. - :attr str confidence_level: (optional) The confidence level in the identification of - the effective date. - :attr Location location: (optional) The numeric location of the identified element in - the document, represented with two integers labeled `begin` and `end`. - """ - - def __init__(self, text=None, confidence_level=None, location=None): - """ - Initialize a EffectiveDates object. - - :param str text: (optional) The effective date, listed as a string. - :param str confidence_level: (optional) The confidence level in the identification - of the effective date. - :param Location location: (optional) The numeric location of the identified - element in the document, represented with two integers labeled `begin` and `end`. - """ - self.text = text - self.confidence_level = confidence_level - self.location = location - - @classmethod - def _from_dict(cls, _dict): - """Initialize a EffectiveDates object from a json dictionary.""" - args = {} - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'confidence_level' in _dict: - args['confidence_level'] = _dict.get('confidence_level') - if 'location' in _dict: - args['location'] = Location._from_dict(_dict.get('location')) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - if hasattr(self, - 'confidence_level') and self.confidence_level is not None: - _dict['confidence_level'] = self.confidence_level - if hasattr(self, 'location') and self.location is not None: - _dict['location'] = self.location._to_dict() - return _dict - - def __str__(self): - """Return a `str` version of this EffectiveDates object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Element(object): - """ - A component part of the document. - - :attr Location location: (optional) The numeric location of the identified element in - the document, represented with two integers labeled `begin` and `end`. - :attr str text: (optional) The text of the element. - :attr list[TypeLabel] types: (optional) Description of the action specified by the - element and whom it affects. - :attr list[Category] categories: (optional) List of functional categories into which - the element falls; in other words, the subject matter of the element. - :attr list[Attribute] attributes: (optional) List of document attributes. - """ - - def __init__(self, - location=None, - text=None, - types=None, - categories=None, - attributes=None): - """ - Initialize a Element object. - - :param Location location: (optional) The numeric location of the identified - element in the document, represented with two integers labeled `begin` and `end`. - :param str text: (optional) The text of the element. - :param list[TypeLabel] types: (optional) Description of the action specified by - the element and whom it affects. - :param list[Category] categories: (optional) List of functional categories into - which the element falls; in other words, the subject matter of the element. - :param list[Attribute] attributes: (optional) List of document attributes. - """ - self.location = location - self.text = text - self.types = types - self.categories = categories - self.attributes = attributes - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Element object from a json dictionary.""" - args = {} - if 'location' in _dict: - args['location'] = Location._from_dict(_dict.get('location')) - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'types' in _dict: - args['types'] = [ - TypeLabel._from_dict(x) for x in (_dict.get('types')) - ] - if 'categories' in _dict: - args['categories'] = [ - Category._from_dict(x) for x in (_dict.get('categories')) - ] - if 'attributes' in _dict: - args['attributes'] = [ - Attribute._from_dict(x) for x in (_dict.get('attributes')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'location') and self.location is not None: - _dict['location'] = self.location._to_dict() - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - if hasattr(self, 'types') and self.types is not None: - _dict['types'] = [x._to_dict() for x in self.types] - if hasattr(self, 'categories') and self.categories is not None: - _dict['categories'] = [x._to_dict() for x in self.categories] - if hasattr(self, 'attributes') and self.attributes is not None: - _dict['attributes'] = [x._to_dict() for x in self.attributes] - return _dict - - def __str__(self): - """Return a `str` version of this Element object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class ElementLocations(object): - """ - A list of `begin` and `end` indexes that indicate the locations of the elements in the - input document. - - :attr int begin: (optional) An integer that indicates the starting position of the - element in the input document. - :attr int end: (optional) An integer that indicates the ending position of the element - in the input document. - """ - - def __init__(self, begin=None, end=None): - """ - Initialize a ElementLocations object. - - :param int begin: (optional) An integer that indicates the starting position of - the element in the input document. - :param int end: (optional) An integer that indicates the ending position of the - element in the input document. - """ - self.begin = begin - self.end = end - - @classmethod - def _from_dict(cls, _dict): - """Initialize a ElementLocations object from a json dictionary.""" - args = {} - if 'begin' in _dict: - args['begin'] = _dict.get('begin') - if 'end' in _dict: - args['end'] = _dict.get('end') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'begin') and self.begin is not None: - _dict['begin'] = self.begin - if hasattr(self, 'end') and self.end is not None: - _dict['end'] = self.end - return _dict - - def __str__(self): - """Return a `str` version of this ElementLocations object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class ElementPair(object): - """ - Details of semantically aligned elements. - - :attr str document_label: (optional) The label of the document (that is, the value of - either the `file_1_label` or `file_2_label` parameters) in which the element occurs. - :attr str text: (optional) The contents of the element. - :attr Location location: (optional) The numeric location of the identified element in - the document, represented with two integers labeled `begin` and `end`. - :attr list[TypeLabelComparison] types: (optional) Description of the action specified - by the element and whom it affects. - :attr list[CategoryComparison] categories: (optional) List of functional categories - into which the element falls; in other words, the subject matter of the element. - :attr list[Attribute] attributes: (optional) List of document attributes. - """ - - def __init__(self, - document_label=None, - text=None, - location=None, - types=None, - categories=None, - attributes=None): - """ - Initialize a ElementPair object. - - :param str document_label: (optional) The label of the document (that is, the - value of either the `file_1_label` or `file_2_label` parameters) in which the - element occurs. - :param str text: (optional) The contents of the element. - :param Location location: (optional) The numeric location of the identified - element in the document, represented with two integers labeled `begin` and `end`. - :param list[TypeLabelComparison] types: (optional) Description of the action - specified by the element and whom it affects. - :param list[CategoryComparison] categories: (optional) List of functional - categories into which the element falls; in other words, the subject matter of the - element. - :param list[Attribute] attributes: (optional) List of document attributes. - """ - self.document_label = document_label - self.text = text - self.location = location - self.types = types - self.categories = categories - self.attributes = attributes - - @classmethod - def _from_dict(cls, _dict): - """Initialize a ElementPair object from a json dictionary.""" - args = {} - if 'document_label' in _dict: - args['document_label'] = _dict.get('document_label') - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'location' in _dict: - args['location'] = Location._from_dict(_dict.get('location')) - if 'types' in _dict: - args['types'] = [ - TypeLabelComparison._from_dict(x) for x in (_dict.get('types')) - ] - if 'categories' in _dict: - args['categories'] = [ - CategoryComparison._from_dict(x) - for x in (_dict.get('categories')) - ] - if 'attributes' in _dict: - args['attributes'] = [ - Attribute._from_dict(x) for x in (_dict.get('attributes')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'document_label') and self.document_label is not None: - _dict['document_label'] = self.document_label - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - if hasattr(self, 'location') and self.location is not None: - _dict['location'] = self.location._to_dict() - if hasattr(self, 'types') and self.types is not None: - _dict['types'] = [x._to_dict() for x in self.types] - if hasattr(self, 'categories') and self.categories is not None: - _dict['categories'] = [x._to_dict() for x in self.categories] - if hasattr(self, 'attributes') and self.attributes is not None: - _dict['attributes'] = [x._to_dict() for x in self.attributes] - return _dict - - def __str__(self): - """Return a `str` version of this ElementPair object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class FeedbackDataInput(object): - """ - Feedback data for submission. - - :attr str feedback_type: The type of feedback. The only permitted value is - `element_classification`. - :attr ShortDoc document: (optional) Brief information about the input document. - :attr str model_id: (optional) An optional string identifying the model ID. The only - permitted value is `contracts`. - :attr str model_version: (optional) An optional string identifying the version of the - model used. - :attr Location location: The numeric location of the identified element in the - document, represented with two integers labeled `begin` and `end`. - :attr str text: The text on which to submit feedback. - :attr OriginalLabelsIn original_labels: The original labeling from the input document, - without the submitted feedback. - :attr UpdatedLabelsIn updated_labels: The updated labeling from the input document, - accounting for the submitted feedback. - """ - - def __init__(self, - feedback_type, - location, - text, - original_labels, - updated_labels, - document=None, - model_id=None, - model_version=None): - """ - Initialize a FeedbackDataInput object. - - :param str feedback_type: The type of feedback. The only permitted value is - `element_classification`. - :param Location location: The numeric location of the identified element in the - document, represented with two integers labeled `begin` and `end`. - :param str text: The text on which to submit feedback. - :param OriginalLabelsIn original_labels: The original labeling from the input - document, without the submitted feedback. - :param UpdatedLabelsIn updated_labels: The updated labeling from the input - document, accounting for the submitted feedback. - :param ShortDoc document: (optional) Brief information about the input document. - :param str model_id: (optional) An optional string identifying the model ID. The - only permitted value is `contracts`. - :param str model_version: (optional) An optional string identifying the version of - the model used. - """ - self.feedback_type = feedback_type - self.document = document - self.model_id = model_id - self.model_version = model_version - self.location = location - self.text = text - self.original_labels = original_labels - self.updated_labels = updated_labels - - @classmethod - def _from_dict(cls, _dict): - """Initialize a FeedbackDataInput object from a json dictionary.""" - args = {} - if 'feedback_type' in _dict: - args['feedback_type'] = _dict.get('feedback_type') - else: - raise ValueError( - 'Required property \'feedback_type\' not present in FeedbackDataInput JSON' - ) - if 'document' in _dict: - args['document'] = ShortDoc._from_dict(_dict.get('document')) - if 'model_id' in _dict: - args['model_id'] = _dict.get('model_id') - if 'model_version' in _dict: - args['model_version'] = _dict.get('model_version') - if 'location' in _dict: - args['location'] = Location._from_dict(_dict.get('location')) - else: - raise ValueError( - 'Required property \'location\' not present in FeedbackDataInput JSON' - ) - if 'text' in _dict: - args['text'] = _dict.get('text') - else: - raise ValueError( - 'Required property \'text\' not present in FeedbackDataInput JSON' - ) - if 'original_labels' in _dict: - args['original_labels'] = OriginalLabelsIn._from_dict( - _dict.get('original_labels')) - else: - raise ValueError( - 'Required property \'original_labels\' not present in FeedbackDataInput JSON' - ) - if 'updated_labels' in _dict: - args['updated_labels'] = UpdatedLabelsIn._from_dict( - _dict.get('updated_labels')) - else: - raise ValueError( - 'Required property \'updated_labels\' not present in FeedbackDataInput JSON' - ) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'feedback_type') and self.feedback_type is not None: - _dict['feedback_type'] = self.feedback_type - if hasattr(self, 'document') and self.document is not None: - _dict['document'] = self.document._to_dict() - if hasattr(self, 'model_id') and self.model_id is not None: - _dict['model_id'] = self.model_id - if hasattr(self, 'model_version') and self.model_version is not None: - _dict['model_version'] = self.model_version - if hasattr(self, 'location') and self.location is not None: - _dict['location'] = self.location._to_dict() - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - if hasattr(self, - 'original_labels') and self.original_labels is not None: - _dict['original_labels'] = self.original_labels._to_dict() - if hasattr(self, 'updated_labels') and self.updated_labels is not None: - _dict['updated_labels'] = self.updated_labels._to_dict() - return _dict - - def __str__(self): - """Return a `str` version of this FeedbackDataInput object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class FeedbackDataOutput(object): - """ - Information returned from the `POST /v1/feedback` method. - - :attr str feedback_type: (optional) A string identifying the user adding the feedback. - The only permitted value is `element_classification`. - :attr ShortDoc document: (optional) Brief information about the input document. - :attr str model_id: (optional) An optional string identifying the model ID. The only - permitted value is `contracts`. - :attr str model_version: (optional) An optional string identifying the version of the - model used. - :attr Location location: (optional) The numeric location of the identified element in - the document, represented with two integers labeled `begin` and `end`. - :attr str text: (optional) The text to which the feedback applies. - :attr OriginalLabelsOut original_labels: (optional) The original labeling from the - input document, without the submitted feedback. - :attr UpdatedLabelsOut updated_labels: (optional) The updated labeling from the input - document, accounting for the submitted feedback. - :attr Pagination pagination: (optional) Pagination details, if required by the length - of the output. - """ - - def __init__(self, - feedback_type=None, - document=None, - model_id=None, - model_version=None, - location=None, - text=None, - original_labels=None, - updated_labels=None, - pagination=None): - """ - Initialize a FeedbackDataOutput object. - - :param str feedback_type: (optional) A string identifying the user adding the - feedback. The only permitted value is `element_classification`. - :param ShortDoc document: (optional) Brief information about the input document. - :param str model_id: (optional) An optional string identifying the model ID. The - only permitted value is `contracts`. - :param str model_version: (optional) An optional string identifying the version of - the model used. - :param Location location: (optional) The numeric location of the identified - element in the document, represented with two integers labeled `begin` and `end`. - :param str text: (optional) The text to which the feedback applies. - :param OriginalLabelsOut original_labels: (optional) The original labeling from - the input document, without the submitted feedback. - :param UpdatedLabelsOut updated_labels: (optional) The updated labeling from the - input document, accounting for the submitted feedback. - :param Pagination pagination: (optional) Pagination details, if required by the - length of the output. - """ - self.feedback_type = feedback_type - self.document = document - self.model_id = model_id - self.model_version = model_version - self.location = location - self.text = text - self.original_labels = original_labels - self.updated_labels = updated_labels - self.pagination = pagination - - @classmethod - def _from_dict(cls, _dict): - """Initialize a FeedbackDataOutput object from a json dictionary.""" - args = {} - if 'feedback_type' in _dict: - args['feedback_type'] = _dict.get('feedback_type') - if 'document' in _dict: - args['document'] = ShortDoc._from_dict(_dict.get('document')) - if 'model_id' in _dict: - args['model_id'] = _dict.get('model_id') - if 'model_version' in _dict: - args['model_version'] = _dict.get('model_version') - if 'location' in _dict: - args['location'] = Location._from_dict(_dict.get('location')) - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'original_labels' in _dict: - args['original_labels'] = OriginalLabelsOut._from_dict( - _dict.get('original_labels')) - if 'updated_labels' in _dict: - args['updated_labels'] = UpdatedLabelsOut._from_dict( - _dict.get('updated_labels')) - if 'pagination' in _dict: - args['pagination'] = Pagination._from_dict(_dict.get('pagination')) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'feedback_type') and self.feedback_type is not None: - _dict['feedback_type'] = self.feedback_type - if hasattr(self, 'document') and self.document is not None: - _dict['document'] = self.document._to_dict() - if hasattr(self, 'model_id') and self.model_id is not None: - _dict['model_id'] = self.model_id - if hasattr(self, 'model_version') and self.model_version is not None: - _dict['model_version'] = self.model_version - if hasattr(self, 'location') and self.location is not None: - _dict['location'] = self.location._to_dict() - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - if hasattr(self, - 'original_labels') and self.original_labels is not None: - _dict['original_labels'] = self.original_labels._to_dict() - if hasattr(self, 'updated_labels') and self.updated_labels is not None: - _dict['updated_labels'] = self.updated_labels._to_dict() - if hasattr(self, 'pagination') and self.pagination is not None: - _dict['pagination'] = self.pagination._to_dict() - return _dict - - def __str__(self): - """Return a `str` version of this FeedbackDataOutput object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class FeedbackDeleted(object): - """ - The status and message of the deletion request. - - :attr int status: (optional) HTTP return code. - :attr str message: (optional) Status message returned from the service. - """ - - def __init__(self, status=None, message=None): - """ - Initialize a FeedbackDeleted object. - - :param int status: (optional) HTTP return code. - :param str message: (optional) Status message returned from the service. - """ - self.status = status - self.message = message - - @classmethod - def _from_dict(cls, _dict): - """Initialize a FeedbackDeleted object from a json dictionary.""" - args = {} - if 'status' in _dict: - args['status'] = _dict.get('status') - if 'message' in _dict: - args['message'] = _dict.get('message') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'status') and self.status is not None: - _dict['status'] = self.status - if hasattr(self, 'message') and self.message is not None: - _dict['message'] = self.message - return _dict - - def __str__(self): - """Return a `str` version of this FeedbackDeleted object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class FeedbackList(object): - """ - The results of a successful `GET /v1/feedback` request. - - :attr list[GetFeedback] feedback: (optional) A list of all feedback for the document. - """ - - def __init__(self, feedback=None): - """ - Initialize a FeedbackList object. - - :param list[GetFeedback] feedback: (optional) A list of all feedback for the - document. - """ - self.feedback = feedback - - @classmethod - def _from_dict(cls, _dict): - """Initialize a FeedbackList object from a json dictionary.""" - args = {} - if 'feedback' in _dict: - args['feedback'] = [ - GetFeedback._from_dict(x) for x in (_dict.get('feedback')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'feedback') and self.feedback is not None: - _dict['feedback'] = [x._to_dict() for x in self.feedback] - return _dict - - def __str__(self): - """Return a `str` version of this FeedbackList object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class FeedbackReturn(object): - """ - Information about the document and the submitted feedback. - - :attr str feedback_id: (optional) The unique ID of the feedback object. - :attr str user_id: (optional) An optional string identifying the person submitting - feedback. - :attr str comment: (optional) An optional comment from the person submitting the - feedback. - :attr datetime created: (optional) Timestamp listing the creation time of the feedback - submission. - :attr FeedbackDataOutput feedback_data: (optional) Information returned from the `POST - /v1/feedback` method. - """ - - def __init__(self, - feedback_id=None, - user_id=None, - comment=None, - created=None, - feedback_data=None): - """ - Initialize a FeedbackReturn object. - - :param str feedback_id: (optional) The unique ID of the feedback object. - :param str user_id: (optional) An optional string identifying the person - submitting feedback. - :param str comment: (optional) An optional comment from the person submitting the - feedback. - :param datetime created: (optional) Timestamp listing the creation time of the - feedback submission. - :param FeedbackDataOutput feedback_data: (optional) Information returned from the - `POST /v1/feedback` method. - """ - self.feedback_id = feedback_id - self.user_id = user_id - self.comment = comment - self.created = created - self.feedback_data = feedback_data - - @classmethod - def _from_dict(cls, _dict): - """Initialize a FeedbackReturn object from a json dictionary.""" - args = {} - if 'feedback_id' in _dict: - args['feedback_id'] = _dict.get('feedback_id') - if 'user_id' in _dict: - args['user_id'] = _dict.get('user_id') - if 'comment' in _dict: - args['comment'] = _dict.get('comment') - if 'created' in _dict: - args['created'] = string_to_datetime(_dict.get('created')) - if 'feedback_data' in _dict: - args['feedback_data'] = FeedbackDataOutput._from_dict( - _dict.get('feedback_data')) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'feedback_id') and self.feedback_id is not None: - _dict['feedback_id'] = self.feedback_id - if hasattr(self, 'user_id') and self.user_id is not None: - _dict['user_id'] = self.user_id - if hasattr(self, 'comment') and self.comment is not None: - _dict['comment'] = self.comment - if hasattr(self, 'created') and self.created is not None: - _dict['created'] = datetime_to_string(self.created) - if hasattr(self, 'feedback_data') and self.feedback_data is not None: - _dict['feedback_data'] = self.feedback_data._to_dict() - return _dict - - def __str__(self): - """Return a `str` version of this FeedbackReturn object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class GetFeedback(object): - """ - The results of a single feedback query. - - :attr str feedback_id: (optional) A string uniquely identifying the feedback entry. - :attr datetime created: (optional) A timestamp identifying the creation time of the - feedback entry. - :attr str comment: (optional) A string containing the user's comment about the - feedback entry. - :attr FeedbackDataOutput feedback_data: (optional) Information returned from the `POST - /v1/feedback` method. - """ - - def __init__(self, - feedback_id=None, - created=None, - comment=None, - feedback_data=None): - """ - Initialize a GetFeedback object. - - :param str feedback_id: (optional) A string uniquely identifying the feedback - entry. - :param datetime created: (optional) A timestamp identifying the creation time of - the feedback entry. - :param str comment: (optional) A string containing the user's comment about the - feedback entry. - :param FeedbackDataOutput feedback_data: (optional) Information returned from the - `POST /v1/feedback` method. - """ - self.feedback_id = feedback_id - self.created = created - self.comment = comment - self.feedback_data = feedback_data - - @classmethod - def _from_dict(cls, _dict): - """Initialize a GetFeedback object from a json dictionary.""" - args = {} - if 'feedback_id' in _dict: - args['feedback_id'] = _dict.get('feedback_id') - if 'created' in _dict: - args['created'] = string_to_datetime(_dict.get('created')) - if 'comment' in _dict: - args['comment'] = _dict.get('comment') - if 'feedback_data' in _dict: - args['feedback_data'] = FeedbackDataOutput._from_dict( - _dict.get('feedback_data')) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'feedback_id') and self.feedback_id is not None: - _dict['feedback_id'] = self.feedback_id - if hasattr(self, 'created') and self.created is not None: - _dict['created'] = datetime_to_string(self.created) - if hasattr(self, 'comment') and self.comment is not None: - _dict['comment'] = self.comment - if hasattr(self, 'feedback_data') and self.feedback_data is not None: - _dict['feedback_data'] = self.feedback_data._to_dict() - return _dict - - def __str__(self): - """Return a `str` version of this GetFeedback object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class HTMLReturn(object): - """ - The HTML converted from an input document. - - :attr str num_pages: (optional) The number of pages in the input document. - :attr str author: (optional) The author of the input document, if identified. - :attr str publication_date: (optional) The publication date of the input document, if - identified. - :attr str title: (optional) The title of the input document, if identified. - :attr str html: (optional) The HTML version of the input document. - """ - - def __init__(self, - num_pages=None, - author=None, - publication_date=None, - title=None, - html=None): - """ - Initialize a HTMLReturn object. - - :param str num_pages: (optional) The number of pages in the input document. - :param str author: (optional) The author of the input document, if identified. - :param str publication_date: (optional) The publication date of the input - document, if identified. - :param str title: (optional) The title of the input document, if identified. - :param str html: (optional) The HTML version of the input document. - """ - self.num_pages = num_pages - self.author = author - self.publication_date = publication_date - self.title = title - self.html = html - - @classmethod - def _from_dict(cls, _dict): - """Initialize a HTMLReturn object from a json dictionary.""" - args = {} - if 'num_pages' in _dict: - args['num_pages'] = _dict.get('num_pages') - if 'author' in _dict: - args['author'] = _dict.get('author') - if 'publication_date' in _dict: - args['publication_date'] = _dict.get('publication_date') - if 'title' in _dict: - args['title'] = _dict.get('title') - if 'html' in _dict: - args['html'] = _dict.get('html') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'num_pages') and self.num_pages is not None: - _dict['num_pages'] = self.num_pages - if hasattr(self, 'author') and self.author is not None: - _dict['author'] = self.author - if hasattr(self, - 'publication_date') and self.publication_date is not None: - _dict['publication_date'] = self.publication_date - if hasattr(self, 'title') and self.title is not None: - _dict['title'] = self.title - if hasattr(self, 'html') and self.html is not None: - _dict['html'] = self.html - return _dict - - def __str__(self): - """Return a `str` version of this HTMLReturn object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Key(object): - """ - A key in a key-value pair. - - :attr str cell_id: (optional) The unique ID of the key in the table. - :attr Location location: (optional) The numeric location of the identified element in - the document, represented with two integers labeled `begin` and `end`. - :attr str text: (optional) The text content of the table cell without HTML markup. - """ - - def __init__(self, cell_id=None, location=None, text=None): - """ - Initialize a Key object. - - :param str cell_id: (optional) The unique ID of the key in the table. - :param Location location: (optional) The numeric location of the identified - element in the document, represented with two integers labeled `begin` and `end`. - :param str text: (optional) The text content of the table cell without HTML - markup. - """ - self.cell_id = cell_id - self.location = location - self.text = text - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Key object from a json dictionary.""" - args = {} - if 'cell_id' in _dict: - args['cell_id'] = _dict.get('cell_id') - if 'location' in _dict: - args['location'] = Location._from_dict(_dict.get('location')) - if 'text' in _dict: - args['text'] = _dict.get('text') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'cell_id') and self.cell_id is not None: - _dict['cell_id'] = self.cell_id - if hasattr(self, 'location') and self.location is not None: - _dict['location'] = self.location._to_dict() - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - return _dict - - def __str__(self): - """Return a `str` version of this Key object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class KeyValuePair(object): - """ - Key-value pairs detected across cell boundaries. - - :attr Key key: (optional) A key in a key-value pair. - :attr Value value: (optional) A value in a key-value pair. - """ - - def __init__(self, key=None, value=None): - """ - Initialize a KeyValuePair object. - - :param Key key: (optional) A key in a key-value pair. - :param Value value: (optional) A value in a key-value pair. - """ - self.key = key - self.value = value - - @classmethod - def _from_dict(cls, _dict): - """Initialize a KeyValuePair object from a json dictionary.""" - args = {} - if 'key' in _dict: - args['key'] = Key._from_dict(_dict.get('key')) - if 'value' in _dict: - args['value'] = Value._from_dict(_dict.get('value')) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'key') and self.key is not None: - _dict['key'] = self.key._to_dict() - if hasattr(self, 'value') and self.value is not None: - _dict['value'] = self.value._to_dict() - return _dict - - def __str__(self): - """Return a `str` version of this KeyValuePair object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Label(object): - """ - A pair of `nature` and `party` objects. The `nature` object identifies the effect of - the element on the identified `party`, and the `party` object identifies the affected - party. - - :attr str nature: The identified `nature` of the element. - :attr str party: The identified `party` of the element. - """ - - def __init__(self, nature, party): - """ - Initialize a Label object. - - :param str nature: The identified `nature` of the element. - :param str party: The identified `party` of the element. - """ - self.nature = nature - self.party = party - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Label object from a json dictionary.""" - args = {} - if 'nature' in _dict: - args['nature'] = _dict.get('nature') - else: - raise ValueError( - 'Required property \'nature\' not present in Label JSON') - if 'party' in _dict: - args['party'] = _dict.get('party') - else: - raise ValueError( - 'Required property \'party\' not present in Label JSON') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'nature') and self.nature is not None: - _dict['nature'] = self.nature - if hasattr(self, 'party') and self.party is not None: - _dict['party'] = self.party - return _dict - - def __str__(self): - """Return a `str` version of this Label object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class LeadingSentence(object): - """ - The leading sentences in a section or subsection of the input document. - - :attr str text: (optional) The text of the leading sentence. - :attr Location location: (optional) The numeric location of the identified element in - the document, represented with two integers labeled `begin` and `end`. - :attr list[ElementLocations] element_locations: (optional) An array of `location` - objects that lists the locations of detected leading sentences. - """ - - def __init__(self, text=None, location=None, element_locations=None): - """ - Initialize a LeadingSentence object. - - :param str text: (optional) The text of the leading sentence. - :param Location location: (optional) The numeric location of the identified - element in the document, represented with two integers labeled `begin` and `end`. - :param list[ElementLocations] element_locations: (optional) An array of `location` - objects that lists the locations of detected leading sentences. - """ - self.text = text - self.location = location - self.element_locations = element_locations - - @classmethod - def _from_dict(cls, _dict): - """Initialize a LeadingSentence object from a json dictionary.""" - args = {} - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'location' in _dict: - args['location'] = Location._from_dict(_dict.get('location')) - if 'element_locations' in _dict: - args['element_locations'] = [ - ElementLocations._from_dict(x) - for x in (_dict.get('element_locations')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - if hasattr(self, 'location') and self.location is not None: - _dict['location'] = self.location._to_dict() - if hasattr(self, - 'element_locations') and self.element_locations is not None: - _dict['element_locations'] = [ - x._to_dict() for x in self.element_locations - ] - return _dict - - def __str__(self): - """Return a `str` version of this LeadingSentence object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Location(object): - """ - The numeric location of the identified element in the document, represented with two - integers labeled `begin` and `end`. - - :attr int begin: The element's `begin` index. - :attr int end: The element's `end` index. - """ - - def __init__(self, begin, end): - """ - Initialize a Location object. - - :param int begin: The element's `begin` index. - :param int end: The element's `end` index. - """ - self.begin = begin - self.end = end - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Location object from a json dictionary.""" - args = {} - if 'begin' in _dict: - args['begin'] = _dict.get('begin') - else: - raise ValueError( - 'Required property \'begin\' not present in Location JSON') - if 'end' in _dict: - args['end'] = _dict.get('end') - else: - raise ValueError( - 'Required property \'end\' not present in Location JSON') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'begin') and self.begin is not None: - _dict['begin'] = self.begin - if hasattr(self, 'end') and self.end is not None: - _dict['end'] = self.end - return _dict - - def __str__(self): - """Return a `str` version of this Location object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class OriginalLabelsIn(object): - """ - The original labeling from the input document, without the submitted feedback. - - :attr list[TypeLabel] types: Description of the action specified by the element and - whom it affects. - :attr list[Category] categories: List of functional categories into which the element - falls; in other words, the subject matter of the element. - """ - - def __init__(self, types, categories): - """ - Initialize a OriginalLabelsIn object. - - :param list[TypeLabel] types: Description of the action specified by the element - and whom it affects. - :param list[Category] categories: List of functional categories into which the - element falls; in other words, the subject matter of the element. - """ - self.types = types - self.categories = categories - - @classmethod - def _from_dict(cls, _dict): - """Initialize a OriginalLabelsIn object from a json dictionary.""" - args = {} - if 'types' in _dict: - args['types'] = [ - TypeLabel._from_dict(x) for x in (_dict.get('types')) - ] - else: - raise ValueError( - 'Required property \'types\' not present in OriginalLabelsIn JSON' - ) - if 'categories' in _dict: - args['categories'] = [ - Category._from_dict(x) for x in (_dict.get('categories')) - ] - else: - raise ValueError( - 'Required property \'categories\' not present in OriginalLabelsIn JSON' - ) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'types') and self.types is not None: - _dict['types'] = [x._to_dict() for x in self.types] - if hasattr(self, 'categories') and self.categories is not None: - _dict['categories'] = [x._to_dict() for x in self.categories] - return _dict - - def __str__(self): - """Return a `str` version of this OriginalLabelsIn object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class OriginalLabelsOut(object): - """ - The original labeling from the input document, without the submitted feedback. - - :attr list[TypeLabel] types: (optional) Description of the action specified by the - element and whom it affects. - :attr list[Category] categories: (optional) List of functional categories into which - the element falls; in other words, the subject matter of the element. - :attr str modification: (optional) A string identifying the type of modification the - feedback entry in the `updated_labels` array. Possible values are `added`, - `not_changed`, and `removed`. - """ - - def __init__(self, types=None, categories=None, modification=None): - """ - Initialize a OriginalLabelsOut object. - - :param list[TypeLabel] types: (optional) Description of the action specified by - the element and whom it affects. - :param list[Category] categories: (optional) List of functional categories into - which the element falls; in other words, the subject matter of the element. - :param str modification: (optional) A string identifying the type of modification - the feedback entry in the `updated_labels` array. Possible values are `added`, - `not_changed`, and `removed`. - """ - self.types = types - self.categories = categories - self.modification = modification - - @classmethod - def _from_dict(cls, _dict): - """Initialize a OriginalLabelsOut object from a json dictionary.""" - args = {} - if 'types' in _dict: - args['types'] = [ - TypeLabel._from_dict(x) for x in (_dict.get('types')) - ] - if 'categories' in _dict: - args['categories'] = [ - Category._from_dict(x) for x in (_dict.get('categories')) - ] - if 'modification' in _dict: - args['modification'] = _dict.get('modification') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'types') and self.types is not None: - _dict['types'] = [x._to_dict() for x in self.types] - if hasattr(self, 'categories') and self.categories is not None: - _dict['categories'] = [x._to_dict() for x in self.categories] - if hasattr(self, 'modification') and self.modification is not None: - _dict['modification'] = self.modification - return _dict - - def __str__(self): - """Return a `str` version of this OriginalLabelsOut object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Pagination(object): - """ - Pagination details, if required by the length of the output. - - :attr str refresh_cursor: (optional) A token identifying the current page of results. - :attr str next_cursor: (optional) A token identifying the next page of results. - :attr str refresh_url: (optional) The URL that returns the current page of results. - :attr str next_url: (optional) The URL that returns the next page of results. - :attr int total: (optional) Reserved for future use. - """ - - def __init__(self, - refresh_cursor=None, - next_cursor=None, - refresh_url=None, - next_url=None, - total=None): - """ - Initialize a Pagination object. - - :param str refresh_cursor: (optional) A token identifying the current page of - results. - :param str next_cursor: (optional) A token identifying the next page of results. - :param str refresh_url: (optional) The URL that returns the current page of - results. - :param str next_url: (optional) The URL that returns the next page of results. - :param int total: (optional) Reserved for future use. - """ - self.refresh_cursor = refresh_cursor - self.next_cursor = next_cursor - self.refresh_url = refresh_url - self.next_url = next_url - self.total = total - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Pagination object from a json dictionary.""" - args = {} - if 'refresh_cursor' in _dict: - args['refresh_cursor'] = _dict.get('refresh_cursor') - if 'next_cursor' in _dict: - args['next_cursor'] = _dict.get('next_cursor') - if 'refresh_url' in _dict: - args['refresh_url'] = _dict.get('refresh_url') - if 'next_url' in _dict: - args['next_url'] = _dict.get('next_url') - if 'total' in _dict: - args['total'] = _dict.get('total') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'refresh_cursor') and self.refresh_cursor is not None: - _dict['refresh_cursor'] = self.refresh_cursor - if hasattr(self, 'next_cursor') and self.next_cursor is not None: - _dict['next_cursor'] = self.next_cursor - if hasattr(self, 'refresh_url') and self.refresh_url is not None: - _dict['refresh_url'] = self.refresh_url - if hasattr(self, 'next_url') and self.next_url is not None: - _dict['next_url'] = self.next_url - if hasattr(self, 'total') and self.total is not None: - _dict['total'] = self.total - return _dict - - def __str__(self): - """Return a `str` version of this Pagination object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Parties(object): - """ - A party and its corresponding role, including address and contact information if - identified. - - :attr str party: (optional) A string identifying the party. - :attr str importance: (optional) A string that identifies the importance of the party. - :attr str role: (optional) A string identifying the party's role. - :attr list[Address] addresses: (optional) List of the party's address or addresses. - :attr list[Contact] contacts: (optional) List of the names and roles of contacts - identified in the input document. - """ - - def __init__(self, - party=None, - importance=None, - role=None, - addresses=None, - contacts=None): - """ - Initialize a Parties object. - - :param str party: (optional) A string identifying the party. - :param str importance: (optional) A string that identifies the importance of the - party. - :param str role: (optional) A string identifying the party's role. - :param list[Address] addresses: (optional) List of the party's address or - addresses. - :param list[Contact] contacts: (optional) List of the names and roles of contacts - identified in the input document. - """ - self.party = party - self.importance = importance - self.role = role - self.addresses = addresses - self.contacts = contacts - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Parties object from a json dictionary.""" - args = {} - if 'party' in _dict: - args['party'] = _dict.get('party') - if 'importance' in _dict: - args['importance'] = _dict.get('importance') - if 'role' in _dict: - args['role'] = _dict.get('role') - if 'addresses' in _dict: - args['addresses'] = [ - Address._from_dict(x) for x in (_dict.get('addresses')) - ] - if 'contacts' in _dict: - args['contacts'] = [ - Contact._from_dict(x) for x in (_dict.get('contacts')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'party') and self.party is not None: - _dict['party'] = self.party - if hasattr(self, 'importance') and self.importance is not None: - _dict['importance'] = self.importance - if hasattr(self, 'role') and self.role is not None: - _dict['role'] = self.role - if hasattr(self, 'addresses') and self.addresses is not None: - _dict['addresses'] = [x._to_dict() for x in self.addresses] - if hasattr(self, 'contacts') and self.contacts is not None: - _dict['contacts'] = [x._to_dict() for x in self.contacts] - return _dict - - def __str__(self): - """Return a `str` version of this Parties object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class RowHeaderIds(object): - """ - An array of values, each being the `id` value of a row header that is applicable to - this body cell. - - :attr str id: (optional) The `id` values of a row header. - """ - - def __init__(self, id=None): - """ - Initialize a RowHeaderIds object. - - :param str id: (optional) The `id` values of a row header. - """ - self.id = id - - @classmethod - def _from_dict(cls, _dict): - """Initialize a RowHeaderIds object from a json dictionary.""" - args = {} - if 'id' in _dict: - args['id'] = _dict.get('id') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'id') and self.id is not None: - _dict['id'] = self.id - return _dict - - def __str__(self): - """Return a `str` version of this RowHeaderIds object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class RowHeaderTexts(object): - """ - An array of values, each being the `text` value of a row header that is applicable to - this body cell. - - :attr str text: (optional) The `text` value of a row header. - """ - - def __init__(self, text=None): - """ - Initialize a RowHeaderTexts object. - - :param str text: (optional) The `text` value of a row header. - """ - self.text = text - - @classmethod - def _from_dict(cls, _dict): - """Initialize a RowHeaderTexts object from a json dictionary.""" - args = {} - if 'text' in _dict: - args['text'] = _dict.get('text') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - return _dict - - def __str__(self): - """Return a `str` version of this RowHeaderTexts object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class RowHeaderTextsNormalized(object): - """ - If you provide customization input, the normalized version of the row header texts - according to the customization; otherwise, the same value as `row_header_texts`. - - :attr str text_normalized: (optional) The normalized version of a row header text. - """ - - def __init__(self, text_normalized=None): - """ - Initialize a RowHeaderTextsNormalized object. - - :param str text_normalized: (optional) The normalized version of a row header - text. - """ - self.text_normalized = text_normalized - - @classmethod - def _from_dict(cls, _dict): - """Initialize a RowHeaderTextsNormalized object from a json dictionary.""" - args = {} - if 'text_normalized' in _dict: - args['text_normalized'] = _dict.get('text_normalized') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, - 'text_normalized') and self.text_normalized is not None: - _dict['text_normalized'] = self.text_normalized - return _dict - - def __str__(self): - """Return a `str` version of this RowHeaderTextsNormalized object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class RowHeaders(object): - """ - Row-level cells, each applicable as a header to other cells in the same row as itself, - of the current table. - - :attr str cell_id: (optional) The unique ID of the cell in the current table. - :attr Location location: (optional) The numeric location of the identified element in - the document, represented with two integers labeled `begin` and `end`. - :attr str text: (optional) The textual contents of this cell from the input document - without associated markup content. - :attr str text_normalized: (optional) If you provide customization input, the - normalized version of the cell text according to the customization; otherwise, the - same value as `text`. - :attr int row_index_begin: (optional) The `begin` index of this cell's `row` location - in the current table. - :attr int row_index_end: (optional) The `end` index of this cell's `row` location in - the current table. - :attr int column_index_begin: (optional) The `begin` index of this cell's `column` - location in the current table. - :attr int column_index_end: (optional) The `end` index of this cell's `column` - location in the current table. - """ - - def __init__(self, - cell_id=None, - location=None, - text=None, - text_normalized=None, - row_index_begin=None, - row_index_end=None, - column_index_begin=None, - column_index_end=None): - """ - Initialize a RowHeaders object. - - :param str cell_id: (optional) The unique ID of the cell in the current table. - :param Location location: (optional) The numeric location of the identified - element in the document, represented with two integers labeled `begin` and `end`. - :param str text: (optional) The textual contents of this cell from the input - document without associated markup content. - :param str text_normalized: (optional) If you provide customization input, the - normalized version of the cell text according to the customization; otherwise, the - same value as `text`. - :param int row_index_begin: (optional) The `begin` index of this cell's `row` - location in the current table. - :param int row_index_end: (optional) The `end` index of this cell's `row` location - in the current table. - :param int column_index_begin: (optional) The `begin` index of this cell's - `column` location in the current table. - :param int column_index_end: (optional) The `end` index of this cell's `column` - location in the current table. - """ - self.cell_id = cell_id - self.location = location - self.text = text - self.text_normalized = text_normalized - self.row_index_begin = row_index_begin - self.row_index_end = row_index_end - self.column_index_begin = column_index_begin - self.column_index_end = column_index_end - - @classmethod - def _from_dict(cls, _dict): - """Initialize a RowHeaders object from a json dictionary.""" - args = {} - if 'cell_id' in _dict: - args['cell_id'] = _dict.get('cell_id') - if 'location' in _dict: - args['location'] = Location._from_dict(_dict.get('location')) - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'text_normalized' in _dict: - args['text_normalized'] = _dict.get('text_normalized') - if 'row_index_begin' in _dict: - args['row_index_begin'] = _dict.get('row_index_begin') - if 'row_index_end' in _dict: - args['row_index_end'] = _dict.get('row_index_end') - if 'column_index_begin' in _dict: - args['column_index_begin'] = _dict.get('column_index_begin') - if 'column_index_end' in _dict: - args['column_index_end'] = _dict.get('column_index_end') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'cell_id') and self.cell_id is not None: - _dict['cell_id'] = self.cell_id - if hasattr(self, 'location') and self.location is not None: - _dict['location'] = self.location._to_dict() - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - if hasattr(self, - 'text_normalized') and self.text_normalized is not None: - _dict['text_normalized'] = self.text_normalized - if hasattr(self, - 'row_index_begin') and self.row_index_begin is not None: - _dict['row_index_begin'] = self.row_index_begin - if hasattr(self, 'row_index_end') and self.row_index_end is not None: - _dict['row_index_end'] = self.row_index_end - if hasattr( - self, - 'column_index_begin') and self.column_index_begin is not None: - _dict['column_index_begin'] = self.column_index_begin - if hasattr(self, - 'column_index_end') and self.column_index_end is not None: - _dict['column_index_end'] = self.column_index_end - return _dict - - def __str__(self): - """Return a `str` version of this RowHeaders object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class SectionTitle(object): - """ - The table's section title, if identified. - - :attr str text: (optional) The text of the section title, if identified. - :attr Location location: (optional) The numeric location of the identified element in - the document, represented with two integers labeled `begin` and `end`. - """ - - def __init__(self, text=None, location=None): - """ - Initialize a SectionTitle object. - - :param str text: (optional) The text of the section title, if identified. - :param Location location: (optional) The numeric location of the identified - element in the document, represented with two integers labeled `begin` and `end`. - """ - self.text = text - self.location = location - - @classmethod - def _from_dict(cls, _dict): - """Initialize a SectionTitle object from a json dictionary.""" - args = {} - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'location' in _dict: - args['location'] = Location._from_dict(_dict.get('location')) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - if hasattr(self, 'location') and self.location is not None: - _dict['location'] = self.location._to_dict() - return _dict - - def __str__(self): - """Return a `str` version of this SectionTitle object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class SectionTitles(object): - """ - An array containing one object per section or subsection detected in the input - document. Sections and subsections are not nested; instead, they are flattened out and - can be placed back in order by using the `begin` and `end` values of the element and - the `level` value of the section. - - :attr str text: (optional) The text of the section title, if identified. - :attr Location location: (optional) The numeric location of the identified element in - the document, represented with two integers labeled `begin` and `end`. - :attr int level: (optional) An integer indicating the level at which the section is - located in the input document. For example, `1` represents a top-level section, `2` - represents a subsection within the level `1` section, and so forth. - :attr list[ElementLocations] element_locations: (optional) An array of `location` - objects that lists the locations of detected section titles. - """ - - def __init__(self, - text=None, - location=None, - level=None, - element_locations=None): - """ - Initialize a SectionTitles object. - - :param str text: (optional) The text of the section title, if identified. - :param Location location: (optional) The numeric location of the identified - element in the document, represented with two integers labeled `begin` and `end`. - :param int level: (optional) An integer indicating the level at which the section - is located in the input document. For example, `1` represents a top-level section, - `2` represents a subsection within the level `1` section, and so forth. - :param list[ElementLocations] element_locations: (optional) An array of `location` - objects that lists the locations of detected section titles. - """ - self.text = text - self.location = location - self.level = level - self.element_locations = element_locations - - @classmethod - def _from_dict(cls, _dict): - """Initialize a SectionTitles object from a json dictionary.""" - args = {} - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'location' in _dict: - args['location'] = Location._from_dict(_dict.get('location')) - if 'level' in _dict: - args['level'] = _dict.get('level') - if 'element_locations' in _dict: - args['element_locations'] = [ - ElementLocations._from_dict(x) - for x in (_dict.get('element_locations')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - if hasattr(self, 'location') and self.location is not None: - _dict['location'] = self.location._to_dict() - if hasattr(self, 'level') and self.level is not None: - _dict['level'] = self.level - if hasattr(self, - 'element_locations') and self.element_locations is not None: - _dict['element_locations'] = [ - x._to_dict() for x in self.element_locations - ] - return _dict - - def __str__(self): - """Return a `str` version of this SectionTitles object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class ShortDoc(object): - """ - Brief information about the input document. - - :attr str title: (optional) The title of the input document, if identified. - :attr str hash: (optional) The MD5 hash of the input document. - """ - - def __init__(self, title=None, hash=None): - """ - Initialize a ShortDoc object. - - :param str title: (optional) The title of the input document, if identified. - :param str hash: (optional) The MD5 hash of the input document. - """ - self.title = title - self.hash = hash - - @classmethod - def _from_dict(cls, _dict): - """Initialize a ShortDoc object from a json dictionary.""" - args = {} - if 'title' in _dict: - args['title'] = _dict.get('title') - if 'hash' in _dict: - args['hash'] = _dict.get('hash') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'title') and self.title is not None: - _dict['title'] = self.title - if hasattr(self, 'hash') and self.hash is not None: - _dict['hash'] = self.hash - return _dict - - def __str__(self): - """Return a `str` version of this ShortDoc object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class TableHeaders(object): - """ - The contents of the current table's header. - - :attr str cell_id: (optional) The unique ID of the cell in the current table. - :attr object location: (optional) The location of the table header cell in the current - table as defined by its `begin` and `end` offsets, respectfully, in the input - document. - :attr str text: (optional) The textual contents of the cell from the input document - without associated markup content. - :attr int row_index_begin: (optional) The `begin` index of this cell's `row` location - in the current table. - :attr int row_index_end: (optional) The `end` index of this cell's `row` location in - the current table. - :attr int column_index_begin: (optional) The `begin` index of this cell's `column` - location in the current table. - :attr int column_index_end: (optional) The `end` index of this cell's `column` - location in the current table. - """ - - def __init__(self, - cell_id=None, - location=None, - text=None, - row_index_begin=None, - row_index_end=None, - column_index_begin=None, - column_index_end=None): - """ - Initialize a TableHeaders object. - - :param str cell_id: (optional) The unique ID of the cell in the current table. - :param object location: (optional) The location of the table header cell in the - current table as defined by its `begin` and `end` offsets, respectfully, in the - input document. - :param str text: (optional) The textual contents of the cell from the input - document without associated markup content. - :param int row_index_begin: (optional) The `begin` index of this cell's `row` - location in the current table. - :param int row_index_end: (optional) The `end` index of this cell's `row` location - in the current table. - :param int column_index_begin: (optional) The `begin` index of this cell's - `column` location in the current table. - :param int column_index_end: (optional) The `end` index of this cell's `column` - location in the current table. - """ - self.cell_id = cell_id - self.location = location - self.text = text - self.row_index_begin = row_index_begin - self.row_index_end = row_index_end - self.column_index_begin = column_index_begin - self.column_index_end = column_index_end - - @classmethod - def _from_dict(cls, _dict): - """Initialize a TableHeaders object from a json dictionary.""" - args = {} - if 'cell_id' in _dict: - args['cell_id'] = _dict.get('cell_id') - if 'location' in _dict: - args['location'] = _dict.get('location') - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'row_index_begin' in _dict: - args['row_index_begin'] = _dict.get('row_index_begin') - if 'row_index_end' in _dict: - args['row_index_end'] = _dict.get('row_index_end') - if 'column_index_begin' in _dict: - args['column_index_begin'] = _dict.get('column_index_begin') - if 'column_index_end' in _dict: - args['column_index_end'] = _dict.get('column_index_end') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'cell_id') and self.cell_id is not None: - _dict['cell_id'] = self.cell_id - if hasattr(self, 'location') and self.location is not None: - _dict['location'] = self.location - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - if hasattr(self, - 'row_index_begin') and self.row_index_begin is not None: - _dict['row_index_begin'] = self.row_index_begin - if hasattr(self, 'row_index_end') and self.row_index_end is not None: - _dict['row_index_end'] = self.row_index_end - if hasattr( - self, - 'column_index_begin') and self.column_index_begin is not None: - _dict['column_index_begin'] = self.column_index_begin - if hasattr(self, - 'column_index_end') and self.column_index_end is not None: - _dict['column_index_end'] = self.column_index_end - return _dict - - def __str__(self): - """Return a `str` version of this TableHeaders object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class TableReturn(object): - """ - The analysis of the document's tables. - - :attr DocInfo document: (optional) Information about the parsed input document. - :attr str model_id: (optional) The ID of the model used to extract the table contents. - The value for table extraction is `tables`. - :attr str model_version: (optional) The version of the `tables` model ID. - :attr list[Tables] tables: (optional) Definitions of the tables identified in the - input document. - """ - - def __init__(self, - document=None, - model_id=None, - model_version=None, - tables=None): - """ - Initialize a TableReturn object. - - :param DocInfo document: (optional) Information about the parsed input document. - :param str model_id: (optional) The ID of the model used to extract the table - contents. The value for table extraction is `tables`. - :param str model_version: (optional) The version of the `tables` model ID. - :param list[Tables] tables: (optional) Definitions of the tables identified in the - input document. - """ - self.document = document - self.model_id = model_id - self.model_version = model_version - self.tables = tables - - @classmethod - def _from_dict(cls, _dict): - """Initialize a TableReturn object from a json dictionary.""" - args = {} - if 'document' in _dict: - args['document'] = DocInfo._from_dict(_dict.get('document')) - if 'model_id' in _dict: - args['model_id'] = _dict.get('model_id') - if 'model_version' in _dict: - args['model_version'] = _dict.get('model_version') - if 'tables' in _dict: - args['tables'] = [ - Tables._from_dict(x) for x in (_dict.get('tables')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'document') and self.document is not None: - _dict['document'] = self.document._to_dict() - if hasattr(self, 'model_id') and self.model_id is not None: - _dict['model_id'] = self.model_id - if hasattr(self, 'model_version') and self.model_version is not None: - _dict['model_version'] = self.model_version - if hasattr(self, 'tables') and self.tables is not None: - _dict['tables'] = [x._to_dict() for x in self.tables] - return _dict - - def __str__(self): - """Return a `str` version of this TableReturn object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Tables(object): - """ - The contents of the tables extracted from a document. - - :attr Location location: (optional) The numeric location of the identified element in - the document, represented with two integers labeled `begin` and `end`. - :attr str text: (optional) The textual contents of the current table from the input - document without associated markup content. - :attr SectionTitle section_title: (optional) The table's section title, if identified. - :attr list[TableHeaders] table_headers: (optional) An array of table-level cells that - apply as headers to all the other cells in the current table. - :attr list[RowHeaders] row_headers: (optional) An array of row-level cells, each - applicable as a header to other cells in the same row as itself, of the current table. - :attr list[ColumnHeaders] column_headers: (optional) An array of column-level cells, - each applicable as a header to other cells in the same column as itself, of the - current table. - :attr list[KeyValuePair] key_value_pairs: (optional) An array of key-value pairs - identified in the current table. - :attr list[BodyCells] body_cells: (optional) An array of cells that are neither table - header nor column header nor row header cells, of the current table with corresponding - row and column header associations. - """ - - def __init__(self, - location=None, - text=None, - section_title=None, - table_headers=None, - row_headers=None, - column_headers=None, - key_value_pairs=None, - body_cells=None): - """ - Initialize a Tables object. - - :param Location location: (optional) The numeric location of the identified - element in the document, represented with two integers labeled `begin` and `end`. - :param str text: (optional) The textual contents of the current table from the - input document without associated markup content. - :param SectionTitle section_title: (optional) The table's section title, if - identified. - :param list[TableHeaders] table_headers: (optional) An array of table-level cells - that apply as headers to all the other cells in the current table. - :param list[RowHeaders] row_headers: (optional) An array of row-level cells, each - applicable as a header to other cells in the same row as itself, of the current - table. - :param list[ColumnHeaders] column_headers: (optional) An array of column-level - cells, each applicable as a header to other cells in the same column as itself, of - the current table. - :param list[KeyValuePair] key_value_pairs: (optional) An array of key-value pairs - identified in the current table. - :param list[BodyCells] body_cells: (optional) An array of cells that are neither - table header nor column header nor row header cells, of the current table with - corresponding row and column header associations. - """ - self.location = location - self.text = text - self.section_title = section_title - self.table_headers = table_headers - self.row_headers = row_headers - self.column_headers = column_headers - self.key_value_pairs = key_value_pairs - self.body_cells = body_cells - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Tables object from a json dictionary.""" - args = {} - if 'location' in _dict: - args['location'] = Location._from_dict(_dict.get('location')) - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'section_title' in _dict: - args['section_title'] = SectionTitle._from_dict( - _dict.get('section_title')) - if 'table_headers' in _dict: - args['table_headers'] = [ - TableHeaders._from_dict(x) for x in (_dict.get('table_headers')) - ] - if 'row_headers' in _dict: - args['row_headers'] = [ - RowHeaders._from_dict(x) for x in (_dict.get('row_headers')) - ] - if 'column_headers' in _dict: - args['column_headers'] = [ - ColumnHeaders._from_dict(x) - for x in (_dict.get('column_headers')) - ] - if 'key_value_pairs' in _dict: - args['key_value_pairs'] = [ - KeyValuePair._from_dict(x) - for x in (_dict.get('key_value_pairs')) - ] - if 'body_cells' in _dict: - args['body_cells'] = [ - BodyCells._from_dict(x) for x in (_dict.get('body_cells')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'location') and self.location is not None: - _dict['location'] = self.location._to_dict() - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - if hasattr(self, 'section_title') and self.section_title is not None: - _dict['section_title'] = self.section_title._to_dict() - if hasattr(self, 'table_headers') and self.table_headers is not None: - _dict['table_headers'] = [x._to_dict() for x in self.table_headers] - if hasattr(self, 'row_headers') and self.row_headers is not None: - _dict['row_headers'] = [x._to_dict() for x in self.row_headers] - if hasattr(self, 'column_headers') and self.column_headers is not None: - _dict['column_headers'] = [ - x._to_dict() for x in self.column_headers - ] - if hasattr(self, - 'key_value_pairs') and self.key_value_pairs is not None: - _dict['key_value_pairs'] = [ - x._to_dict() for x in self.key_value_pairs - ] - if hasattr(self, 'body_cells') and self.body_cells is not None: - _dict['body_cells'] = [x._to_dict() for x in self.body_cells] - return _dict - - def __str__(self): - """Return a `str` version of this Tables object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class TerminationDates(object): - """ - Termination dates identified in the input document. - - :attr str text: (optional) The termination date. - :attr str confidence_level: (optional) The confidence level in the identification of - the termination date. - :attr Location location: (optional) The numeric location of the identified element in - the document, represented with two integers labeled `begin` and `end`. - """ - - def __init__(self, text=None, confidence_level=None, location=None): - """ - Initialize a TerminationDates object. - - :param str text: (optional) The termination date. - :param str confidence_level: (optional) The confidence level in the identification - of the termination date. - :param Location location: (optional) The numeric location of the identified - element in the document, represented with two integers labeled `begin` and `end`. - """ - self.text = text - self.confidence_level = confidence_level - self.location = location - - @classmethod - def _from_dict(cls, _dict): - """Initialize a TerminationDates object from a json dictionary.""" - args = {} - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'confidence_level' in _dict: - args['confidence_level'] = _dict.get('confidence_level') - if 'location' in _dict: - args['location'] = Location._from_dict(_dict.get('location')) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - if hasattr(self, - 'confidence_level') and self.confidence_level is not None: - _dict['confidence_level'] = self.confidence_level - if hasattr(self, 'location') and self.location is not None: - _dict['location'] = self.location._to_dict() - return _dict - - def __str__(self): - """Return a `str` version of this TerminationDates object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class TypeLabel(object): - """ - Identification of a specific type. - - :attr Label label: (optional) A pair of `nature` and `party` objects. The `nature` - object identifies the effect of the element on the identified `party`, and the `party` - object identifies the affected party. - :attr list[str] provenance_ids: (optional) One or more hash values that you can send - to IBM to provide feedback or receive support. - """ - - def __init__(self, label=None, provenance_ids=None): - """ - Initialize a TypeLabel object. - - :param Label label: (optional) A pair of `nature` and `party` objects. The - `nature` object identifies the effect of the element on the identified `party`, - and the `party` object identifies the affected party. - :param list[str] provenance_ids: (optional) One or more hash values that you can - send to IBM to provide feedback or receive support. - """ - self.label = label - self.provenance_ids = provenance_ids - - @classmethod - def _from_dict(cls, _dict): - """Initialize a TypeLabel object from a json dictionary.""" - args = {} - if 'label' in _dict: - args['label'] = Label._from_dict(_dict.get('label')) - if 'provenance_ids' in _dict: - args['provenance_ids'] = _dict.get('provenance_ids') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'label') and self.label is not None: - _dict['label'] = self.label._to_dict() - if hasattr(self, 'provenance_ids') and self.provenance_ids is not None: - _dict['provenance_ids'] = self.provenance_ids - return _dict - - def __str__(self): - """Return a `str` version of this TypeLabel object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class TypeLabelComparison(object): - """ - Identification of a specific type. - - :attr Label label: (optional) A pair of `nature` and `party` objects. The `nature` - object identifies the effect of the element on the identified `party`, and the `party` - object identifies the affected party. - """ - - def __init__(self, label=None): - """ - Initialize a TypeLabelComparison object. - - :param Label label: (optional) A pair of `nature` and `party` objects. The - `nature` object identifies the effect of the element on the identified `party`, - and the `party` object identifies the affected party. - """ - self.label = label - - @classmethod - def _from_dict(cls, _dict): - """Initialize a TypeLabelComparison object from a json dictionary.""" - args = {} - if 'label' in _dict: - args['label'] = Label._from_dict(_dict.get('label')) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'label') and self.label is not None: - _dict['label'] = self.label._to_dict() - return _dict - - def __str__(self): - """Return a `str` version of this TypeLabelComparison object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class UnalignedElement(object): - """ - Element that does not align semantically between two compared documents. - - :attr str document_label: (optional) The label assigned to the document by the value - of the `file_1_label` or `file_2_label` parameters on the **Compare two documents** - method. - :attr Location location: (optional) The numeric location of the identified element in - the document, represented with two integers labeled `begin` and `end`. - :attr str text: (optional) The text of the element. - :attr list[TypeLabelComparison] types: (optional) Description of the action specified - by the element and whom it affects. - :attr list[CategoryComparison] categories: (optional) List of functional categories - into which the element falls; in other words, the subject matter of the element. - :attr list[Attribute] attributes: (optional) List of document attributes. - """ - - def __init__(self, - document_label=None, - location=None, - text=None, - types=None, - categories=None, - attributes=None): - """ - Initialize a UnalignedElement object. - - :param str document_label: (optional) The label assigned to the document by the - value of the `file_1_label` or `file_2_label` parameters on the **Compare two - documents** method. - :param Location location: (optional) The numeric location of the identified - element in the document, represented with two integers labeled `begin` and `end`. - :param str text: (optional) The text of the element. - :param list[TypeLabelComparison] types: (optional) Description of the action - specified by the element and whom it affects. - :param list[CategoryComparison] categories: (optional) List of functional - categories into which the element falls; in other words, the subject matter of the - element. - :param list[Attribute] attributes: (optional) List of document attributes. - """ - self.document_label = document_label - self.location = location - self.text = text - self.types = types - self.categories = categories - self.attributes = attributes - - @classmethod - def _from_dict(cls, _dict): - """Initialize a UnalignedElement object from a json dictionary.""" - args = {} - if 'document_label' in _dict: - args['document_label'] = _dict.get('document_label') - if 'location' in _dict: - args['location'] = Location._from_dict(_dict.get('location')) - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'types' in _dict: - args['types'] = [ - TypeLabelComparison._from_dict(x) for x in (_dict.get('types')) - ] - if 'categories' in _dict: - args['categories'] = [ - CategoryComparison._from_dict(x) - for x in (_dict.get('categories')) - ] - if 'attributes' in _dict: - args['attributes'] = [ - Attribute._from_dict(x) for x in (_dict.get('attributes')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'document_label') and self.document_label is not None: - _dict['document_label'] = self.document_label - if hasattr(self, 'location') and self.location is not None: - _dict['location'] = self.location._to_dict() - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - if hasattr(self, 'types') and self.types is not None: - _dict['types'] = [x._to_dict() for x in self.types] - if hasattr(self, 'categories') and self.categories is not None: - _dict['categories'] = [x._to_dict() for x in self.categories] - if hasattr(self, 'attributes') and self.attributes is not None: - _dict['attributes'] = [x._to_dict() for x in self.attributes] - return _dict - - def __str__(self): - """Return a `str` version of this UnalignedElement object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class UpdatedLabelsIn(object): - """ - The updated labeling from the input document, accounting for the submitted feedback. - - :attr list[TypeLabel] types: Description of the action specified by the element and - whom it affects. - :attr list[Category] categories: List of functional categories into which the element - falls; in other words, the subject matter of the element. - """ - - def __init__(self, types, categories): - """ - Initialize a UpdatedLabelsIn object. - - :param list[TypeLabel] types: Description of the action specified by the element - and whom it affects. - :param list[Category] categories: List of functional categories into which the - element falls; in other words, the subject matter of the element. - """ - self.types = types - self.categories = categories - - @classmethod - def _from_dict(cls, _dict): - """Initialize a UpdatedLabelsIn object from a json dictionary.""" - args = {} - if 'types' in _dict: - args['types'] = [ - TypeLabel._from_dict(x) for x in (_dict.get('types')) - ] - else: - raise ValueError( - 'Required property \'types\' not present in UpdatedLabelsIn JSON' - ) - if 'categories' in _dict: - args['categories'] = [ - Category._from_dict(x) for x in (_dict.get('categories')) - ] - else: - raise ValueError( - 'Required property \'categories\' not present in UpdatedLabelsIn JSON' - ) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'types') and self.types is not None: - _dict['types'] = [x._to_dict() for x in self.types] - if hasattr(self, 'categories') and self.categories is not None: - _dict['categories'] = [x._to_dict() for x in self.categories] - return _dict - - def __str__(self): - """Return a `str` version of this UpdatedLabelsIn object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class UpdatedLabelsOut(object): - """ - The updated labeling from the input document, accounting for the submitted feedback. - - :attr list[TypeLabel] types: (optional) Description of the action specified by the - element and whom it affects. - :attr list[Category] categories: (optional) List of functional categories into which - the element falls; in other words, the subject matter of the element. - :attr str modification: (optional) The type of modification the feedback entry in the - `updated_labels` array. Possible values are `added`, `not_changed`, and `removed`. - """ - - def __init__(self, types=None, categories=None, modification=None): - """ - Initialize a UpdatedLabelsOut object. - - :param list[TypeLabel] types: (optional) Description of the action specified by - the element and whom it affects. - :param list[Category] categories: (optional) List of functional categories into - which the element falls; in other words, the subject matter of the element. - :param str modification: (optional) The type of modification the feedback entry in - the `updated_labels` array. Possible values are `added`, `not_changed`, and - `removed`. - """ - self.types = types - self.categories = categories - self.modification = modification - - @classmethod - def _from_dict(cls, _dict): - """Initialize a UpdatedLabelsOut object from a json dictionary.""" - args = {} - if 'types' in _dict: - args['types'] = [ - TypeLabel._from_dict(x) for x in (_dict.get('types')) - ] - if 'categories' in _dict: - args['categories'] = [ - Category._from_dict(x) for x in (_dict.get('categories')) - ] - if 'modification' in _dict: - args['modification'] = _dict.get('modification') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'types') and self.types is not None: - _dict['types'] = [x._to_dict() for x in self.types] - if hasattr(self, 'categories') and self.categories is not None: - _dict['categories'] = [x._to_dict() for x in self.categories] - if hasattr(self, 'modification') and self.modification is not None: - _dict['modification'] = self.modification - return _dict - - def __str__(self): - """Return a `str` version of this UpdatedLabelsOut object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Value(object): - """ - A value in a key-value pair. - - :attr str cell_id: (optional) The unique ID of the value in the table. - :attr Location location: (optional) The numeric location of the identified element in - the document, represented with two integers labeled `begin` and `end`. - :attr str text: (optional) The text content of the table cell without HTML markup. - """ - - def __init__(self, cell_id=None, location=None, text=None): - """ - Initialize a Value object. - - :param str cell_id: (optional) The unique ID of the value in the table. - :param Location location: (optional) The numeric location of the identified - element in the document, represented with two integers labeled `begin` and `end`. - :param str text: (optional) The text content of the table cell without HTML - markup. - """ - self.cell_id = cell_id - self.location = location - self.text = text - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Value object from a json dictionary.""" - args = {} - if 'cell_id' in _dict: - args['cell_id'] = _dict.get('cell_id') - if 'location' in _dict: - args['location'] = Location._from_dict(_dict.get('location')) - if 'text' in _dict: - args['text'] = _dict.get('text') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'cell_id') and self.cell_id is not None: - _dict['cell_id'] = self.cell_id - if hasattr(self, 'location') and self.location is not None: - _dict['location'] = self.location._to_dict() - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - return _dict - - def __str__(self): - """Return a `str` version of this Value object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other diff --git a/ibm_watson/discovery_v1.py b/ibm_watson/discovery_v1.py deleted file mode 100644 index 97b8625b3..000000000 --- a/ibm_watson/discovery_v1.py +++ /dev/null @@ -1,12165 +0,0 @@ -# coding: utf-8 - -# Copyright 2018 IBM All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -The IBM Watson™ Discovery Service is a cognitive search and content analytics engine -that you can add to applications to identify patterns, trends and actionable insights to -drive better decision-making. Securely unify structured and unstructured data with -pre-enriched content, and use a simplified query language to eliminate the need for manual -filtering of results. -""" - -from __future__ import absolute_import - -import json -from .common import get_sdk_headers -from ibm_cloud_sdk_core import BaseService -from ibm_cloud_sdk_core import datetime_to_string, string_to_datetime -from os.path import basename - -############################################################################## -# Service -############################################################################## - - -class DiscoveryV1(BaseService): - """The Discovery V1 service.""" - - default_url = 'https://gateway.watsonplatform.net/discovery/api' - - def __init__( - self, - version, - url=default_url, - username=None, - password=None, - iam_apikey=None, - iam_access_token=None, - iam_url=None, - ): - """ - Construct a new client for the Discovery service. - - :param str version: The API version date to use with the service, in - "YYYY-MM-DD" format. Whenever the API is changed in a backwards - incompatible way, a new minor version of the API is released. - The service uses the API version for the date you specify, or - the most recent version before that date. Note that you should - not programmatically specify the current date at runtime, in - case the API has been updated since your application's release. - Instead, specify a version date that is compatible with your - application, and don't change it until your application is - ready for a later version. - - :param str url: The base url to use when contacting the service (e.g. - "https://gateway.watsonplatform.net/discovery/api/discovery/api"). - The base url may differ between IBM Cloud regions. - - :param str username: The username used to authenticate with the service. - Username and password credentials are only required to run your - application locally or outside of IBM Cloud. When running on - IBM Cloud, the credentials will be automatically loaded from the - `VCAP_SERVICES` environment variable. - - :param str password: The password used to authenticate with the service. - Username and password credentials are only required to run your - application locally or outside of IBM Cloud. When running on - IBM Cloud, the credentials will be automatically loaded from the - `VCAP_SERVICES` environment variable. - - :param str iam_apikey: An API key that can be used to request IAM tokens. If - this API key is provided, the SDK will manage the token and handle the - refreshing. - - :param str iam_access_token: An IAM access token is fully managed by the application. - Responsibility falls on the application to refresh the token, either before - it expires or reactively upon receiving a 401 from the service as any requests - made with an expired token will fail. - - :param str iam_url: An optional URL for the IAM service API. Defaults to - 'https://iam.cloud.ibm.com/identity/token'. - """ - - BaseService.__init__( - self, - vcap_services_name='discovery', - url=url, - username=username, - password=password, - iam_apikey=iam_apikey, - iam_access_token=iam_access_token, - iam_url=iam_url, - use_vcap_services=True, - display_name='Discovery') - self.version = version - - ######################### - # Environments - ######################### - - def create_environment(self, name, description=None, size=None, **kwargs): - """ - Create an environment. - - Creates a new environment for private data. An environment must be created before - collections can be created. - **Note**: You can create only one environment for private data per service - instance. An attempt to create another environment results in an error. - - :param str name: Name that identifies the environment. - :param str description: Description of the environment. - :param str size: Size of the environment. In the Lite plan the default and only - accepted value is `LT`, in all other plans the default is `S`. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if name is None: - raise ValueError('name must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'create_environment') - headers.update(sdk_headers) - - params = {'version': self.version} - - data = {'name': name, 'description': description, 'size': size} - - url = '/v1/environments' - response = self.request( - method='POST', - url=url, - headers=headers, - params=params, - json=data, - accept_json=True) - return response - - def delete_environment(self, environment_id, **kwargs): - """ - Delete environment. - - :param str environment_id: The ID of the environment. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'delete_environment') - headers.update(sdk_headers) - - params = {'version': self.version} - - url = '/v1/environments/{0}'.format( - *self._encode_path_vars(environment_id)) - response = self.request( - method='DELETE', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def get_environment(self, environment_id, **kwargs): - """ - Get environment info. - - :param str environment_id: The ID of the environment. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'get_environment') - headers.update(sdk_headers) - - params = {'version': self.version} - - url = '/v1/environments/{0}'.format( - *self._encode_path_vars(environment_id)) - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def list_environments(self, name=None, **kwargs): - """ - List environments. - - List existing environments for the service instance. - - :param str name: Show only the environment with the given name. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'list_environments') - headers.update(sdk_headers) - - params = {'version': self.version, 'name': name} - - url = '/v1/environments' - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def list_fields(self, environment_id, collection_ids, **kwargs): - """ - List fields across collections. - - Gets a list of the unique fields (and their types) stored in the indexes of the - specified collections. - - :param str environment_id: The ID of the environment. - :param list[str] collection_ids: A comma-separated list of collection IDs to be - queried against. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if collection_ids is None: - raise ValueError('collection_ids must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'list_fields') - headers.update(sdk_headers) - - params = { - 'version': self.version, - 'collection_ids': self._convert_list(collection_ids) - } - - url = '/v1/environments/{0}/fields'.format( - *self._encode_path_vars(environment_id)) - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def update_environment(self, - environment_id, - name=None, - description=None, - size=None, - **kwargs): - """ - Update an environment. - - Updates an environment. The environment's **name** and **description** parameters - can be changed. You must specify a **name** for the environment. - - :param str environment_id: The ID of the environment. - :param str name: Name that identifies the environment. - :param str description: Description of the environment. - :param str size: Size that the environment should be increased to. Environment - size cannot be modified when using a Lite plan. Environment size can only - increased and not decreased. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'update_environment') - headers.update(sdk_headers) - - params = {'version': self.version} - - data = {'name': name, 'description': description, 'size': size} - - url = '/v1/environments/{0}'.format( - *self._encode_path_vars(environment_id)) - response = self.request( - method='PUT', - url=url, - headers=headers, - params=params, - json=data, - accept_json=True) - return response - - ######################### - # Configurations - ######################### - - def create_configuration(self, - environment_id, - name, - description=None, - conversions=None, - enrichments=None, - normalizations=None, - source=None, - **kwargs): - """ - Add configuration. - - Creates a new configuration. - If the input configuration contains the **configuration_id**, **created**, or - **updated** properties, then they are ignored and overridden by the system, and an - error is not returned so that the overridden fields do not need to be removed when - copying a configuration. - The configuration can contain unrecognized JSON fields. Any such fields are - ignored and do not generate an error. This makes it easier to use newer - configuration files with older versions of the API and the service. It also makes - it possible for the tooling to add additional metadata and information to the - configuration. - - :param str environment_id: The ID of the environment. - :param str name: The name of the configuration. - :param str description: The description of the configuration, if available. - :param Conversions conversions: Document conversion settings. - :param list[Enrichment] enrichments: An array of document enrichment settings for - the configuration. - :param list[NormalizationOperation] normalizations: Defines operations that can be - used to transform the final output JSON into a normalized form. Operations are - executed in the order that they appear in the array. - :param Source source: Object containing source parameters for the configuration. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if name is None: - raise ValueError('name must be provided') - if conversions is not None: - conversions = self._convert_model(conversions, Conversions) - if enrichments is not None: - enrichments = [ - self._convert_model(x, Enrichment) for x in enrichments - ] - if normalizations is not None: - normalizations = [ - self._convert_model(x, NormalizationOperation) - for x in normalizations - ] - if source is not None: - source = self._convert_model(source, Source) - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'create_configuration') - headers.update(sdk_headers) - - params = {'version': self.version} - - data = { - 'name': name, - 'description': description, - 'conversions': conversions, - 'enrichments': enrichments, - 'normalizations': normalizations, - 'source': source - } - - url = '/v1/environments/{0}/configurations'.format( - *self._encode_path_vars(environment_id)) - response = self.request( - method='POST', - url=url, - headers=headers, - params=params, - json=data, - accept_json=True) - return response - - def delete_configuration(self, environment_id, configuration_id, **kwargs): - """ - Delete a configuration. - - The deletion is performed unconditionally. A configuration deletion request - succeeds even if the configuration is referenced by a collection or document - ingestion. However, documents that have already been submitted for processing - continue to use the deleted configuration. Documents are always processed with a - snapshot of the configuration as it existed at the time the document was - submitted. - - :param str environment_id: The ID of the environment. - :param str configuration_id: The ID of the configuration. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if configuration_id is None: - raise ValueError('configuration_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'delete_configuration') - headers.update(sdk_headers) - - params = {'version': self.version} - - url = '/v1/environments/{0}/configurations/{1}'.format( - *self._encode_path_vars(environment_id, configuration_id)) - response = self.request( - method='DELETE', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def get_configuration(self, environment_id, configuration_id, **kwargs): - """ - Get configuration details. - - :param str environment_id: The ID of the environment. - :param str configuration_id: The ID of the configuration. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if configuration_id is None: - raise ValueError('configuration_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'get_configuration') - headers.update(sdk_headers) - - params = {'version': self.version} - - url = '/v1/environments/{0}/configurations/{1}'.format( - *self._encode_path_vars(environment_id, configuration_id)) - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def list_configurations(self, environment_id, name=None, **kwargs): - """ - List configurations. - - Lists existing configurations for the service instance. - - :param str environment_id: The ID of the environment. - :param str name: Find configurations with the given name. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'list_configurations') - headers.update(sdk_headers) - - params = {'version': self.version, 'name': name} - - url = '/v1/environments/{0}/configurations'.format( - *self._encode_path_vars(environment_id)) - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def update_configuration(self, - environment_id, - configuration_id, - name, - description=None, - conversions=None, - enrichments=None, - normalizations=None, - source=None, - **kwargs): - """ - Update a configuration. - - Replaces an existing configuration. - * Completely replaces the original configuration. - * The **configuration_id**, **updated**, and **created** fields are accepted in - the request, but they are ignored, and an error is not generated. It is also - acceptable for users to submit an updated configuration with none of the three - properties. - * Documents are processed with a snapshot of the configuration as it was at the - time the document was submitted to be ingested. This means that already submitted - documents will not see any updates made to the configuration. - - :param str environment_id: The ID of the environment. - :param str configuration_id: The ID of the configuration. - :param str name: The name of the configuration. - :param str description: The description of the configuration, if available. - :param Conversions conversions: Document conversion settings. - :param list[Enrichment] enrichments: An array of document enrichment settings for - the configuration. - :param list[NormalizationOperation] normalizations: Defines operations that can be - used to transform the final output JSON into a normalized form. Operations are - executed in the order that they appear in the array. - :param Source source: Object containing source parameters for the configuration. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if configuration_id is None: - raise ValueError('configuration_id must be provided') - if name is None: - raise ValueError('name must be provided') - if conversions is not None: - conversions = self._convert_model(conversions, Conversions) - if enrichments is not None: - enrichments = [ - self._convert_model(x, Enrichment) for x in enrichments - ] - if normalizations is not None: - normalizations = [ - self._convert_model(x, NormalizationOperation) - for x in normalizations - ] - if source is not None: - source = self._convert_model(source, Source) - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'update_configuration') - headers.update(sdk_headers) - - params = {'version': self.version} - - data = { - 'name': name, - 'description': description, - 'conversions': conversions, - 'enrichments': enrichments, - 'normalizations': normalizations, - 'source': source - } - - url = '/v1/environments/{0}/configurations/{1}'.format( - *self._encode_path_vars(environment_id, configuration_id)) - response = self.request( - method='PUT', - url=url, - headers=headers, - params=params, - json=data, - accept_json=True) - return response - - ######################### - # Test your configuration on a document - ######################### - - def test_configuration_in_environment(self, - environment_id, - configuration=None, - file=None, - filename=None, - file_content_type=None, - metadata=None, - step=None, - configuration_id=None, - **kwargs): - """ - Test configuration. - - Runs a sample document through the default or your configuration and returns - diagnostic information designed to help you understand how the document was - processed. The document is not added to the index. - - :param str environment_id: The ID of the environment. - :param str configuration: The configuration to use to process the document. If - this part is provided, then the provided configuration is used to process the - document. If the **configuration_id** is also provided (both are present at the - same time), then request is rejected. The maximum supported configuration size is - 1 MB. Configuration parts larger than 1 MB are rejected. - See the `GET /configurations/{configuration_id}` operation for an example - configuration. - :param file file: The content of the document to ingest. The maximum supported - file size when adding a file to a collection is 50 megabytes, the maximum - supported file size when testing a confiruration is 1 megabyte. Files larger than - the supported size are rejected. - :param str filename: The filename for file. - :param str file_content_type: The content type of file. - :param str metadata: If you're using the Data Crawler to upload your documents, - you can test a document against the type of metadata that the Data Crawler might - send. The maximum supported metadata file size is 1 MB. Metadata parts larger than - 1 MB are rejected. - Example: ``` { - \"Creator\": \"Johnny Appleseed\", - \"Subject\": \"Apples\" - } ```. - :param str step: Specify to only run the input document through the given step - instead of running the input document through the entire ingestion workflow. Valid - values are `convert`, `enrich`, and `normalize`. - :param str configuration_id: The ID of the configuration to use to process the - document. If the **configuration** form part is also provided (both are present at - the same time), then the request will be rejected. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', - 'test_configuration_in_environment') - headers.update(sdk_headers) - - params = { - 'version': self.version, - 'step': step, - 'configuration_id': configuration_id - } - - form_data = {} - if configuration: - form_data['configuration'] = (None, configuration, 'text/plain') - if file: - if not filename and hasattr(file, 'name'): - filename = basename(file.name) - if not filename: - raise ValueError('filename must be provided') - form_data['file'] = (filename, file, file_content_type or - 'application/octet-stream') - if metadata: - form_data['metadata'] = (None, metadata, 'text/plain') - - url = '/v1/environments/{0}/preview'.format( - *self._encode_path_vars(environment_id)) - response = self.request( - method='POST', - url=url, - headers=headers, - params=params, - files=form_data, - accept_json=True) - return response - - ######################### - # Collections - ######################### - - def create_collection(self, - environment_id, - name, - description=None, - configuration_id=None, - language=None, - **kwargs): - """ - Create a collection. - - :param str environment_id: The ID of the environment. - :param str name: The name of the collection to be created. - :param str description: A description of the collection. - :param str configuration_id: The ID of the configuration in which the collection - is to be created. - :param str language: The language of the documents stored in the collection, in - the form of an ISO 639-1 language code. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if name is None: - raise ValueError('name must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'create_collection') - headers.update(sdk_headers) - - params = {'version': self.version} - - data = { - 'name': name, - 'description': description, - 'configuration_id': configuration_id, - 'language': language - } - - url = '/v1/environments/{0}/collections'.format( - *self._encode_path_vars(environment_id)) - response = self.request( - method='POST', - url=url, - headers=headers, - params=params, - json=data, - accept_json=True) - return response - - def delete_collection(self, environment_id, collection_id, **kwargs): - """ - Delete a collection. - - :param str environment_id: The ID of the environment. - :param str collection_id: The ID of the collection. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if collection_id is None: - raise ValueError('collection_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'delete_collection') - headers.update(sdk_headers) - - params = {'version': self.version} - - url = '/v1/environments/{0}/collections/{1}'.format( - *self._encode_path_vars(environment_id, collection_id)) - response = self.request( - method='DELETE', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def get_collection(self, environment_id, collection_id, **kwargs): - """ - Get collection details. - - :param str environment_id: The ID of the environment. - :param str collection_id: The ID of the collection. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if collection_id is None: - raise ValueError('collection_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'get_collection') - headers.update(sdk_headers) - - params = {'version': self.version} - - url = '/v1/environments/{0}/collections/{1}'.format( - *self._encode_path_vars(environment_id, collection_id)) - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def list_collection_fields(self, environment_id, collection_id, **kwargs): - """ - List collection fields. - - Gets a list of the unique fields (and their types) stored in the index. - - :param str environment_id: The ID of the environment. - :param str collection_id: The ID of the collection. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if collection_id is None: - raise ValueError('collection_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', - 'list_collection_fields') - headers.update(sdk_headers) - - params = {'version': self.version} - - url = '/v1/environments/{0}/collections/{1}/fields'.format( - *self._encode_path_vars(environment_id, collection_id)) - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def list_collections(self, environment_id, name=None, **kwargs): - """ - List collections. - - Lists existing collections for the service instance. - - :param str environment_id: The ID of the environment. - :param str name: Find collections with the given name. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'list_collections') - headers.update(sdk_headers) - - params = {'version': self.version, 'name': name} - - url = '/v1/environments/{0}/collections'.format( - *self._encode_path_vars(environment_id)) - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def update_collection(self, - environment_id, - collection_id, - name, - description=None, - configuration_id=None, - **kwargs): - """ - Update a collection. - - :param str environment_id: The ID of the environment. - :param str collection_id: The ID of the collection. - :param str name: The name of the collection. - :param str description: A description of the collection. - :param str configuration_id: The ID of the configuration in which the collection - is to be updated. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if collection_id is None: - raise ValueError('collection_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'update_collection') - headers.update(sdk_headers) - - params = {'version': self.version} - - data = { - 'name': name, - 'description': description, - 'configuration_id': configuration_id - } - - url = '/v1/environments/{0}/collections/{1}'.format( - *self._encode_path_vars(environment_id, collection_id)) - response = self.request( - method='PUT', - url=url, - headers=headers, - params=params, - json=data, - accept_json=True) - return response - - ######################### - # Query modifications - ######################### - - def create_expansions(self, environment_id, collection_id, expansions, - **kwargs): - """ - Create or update expansion list. - - Create or replace the Expansion list for this collection. The maximum number of - expanded terms per collection is `500`. - The current expansion list is replaced with the uploaded content. - - :param str environment_id: The ID of the environment. - :param str collection_id: The ID of the collection. - :param list[Expansion] expansions: An array of query expansion definitions. - Each object in the **expansions** array represents a term or set of terms that - will be expanded into other terms. Each expansion object can be configured as - bidirectional or unidirectional. Bidirectional means that all terms are expanded - to all other terms in the object. Unidirectional means that a set list of terms - can be expanded into a second list of terms. - To create a bi-directional expansion specify an **expanded_terms** array. When - found in a query, all items in the **expanded_terms** array are then expanded to - the other items in the same array. - To create a uni-directional expansion, specify both an array of **input_terms** - and an array of **expanded_terms**. When items in the **input_terms** array are - present in a query, they are expanded using the items listed in the - **expanded_terms** array. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if collection_id is None: - raise ValueError('collection_id must be provided') - if expansions is None: - raise ValueError('expansions must be provided') - expansions = [self._convert_model(x, Expansion) for x in expansions] - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'create_expansions') - headers.update(sdk_headers) - - params = {'version': self.version} - - data = {'expansions': expansions} - - url = '/v1/environments/{0}/collections/{1}/expansions'.format( - *self._encode_path_vars(environment_id, collection_id)) - response = self.request( - method='POST', - url=url, - headers=headers, - params=params, - json=data, - accept_json=True) - return response - - def create_stopword_list(self, - environment_id, - collection_id, - stopword_file, - stopword_filename=None, - **kwargs): - """ - Create stopword list. - - Upload a custom stopword list to use with the specified collection. - - :param str environment_id: The ID of the environment. - :param str collection_id: The ID of the collection. - :param file stopword_file: The content of the stopword list to ingest. - :param str stopword_filename: The filename for stopword_file. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if collection_id is None: - raise ValueError('collection_id must be provided') - if stopword_file is None: - raise ValueError('stopword_file must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'create_stopword_list') - headers.update(sdk_headers) - - params = {'version': self.version} - - form_data = {} - if not stopword_filename and hasattr(stopword_file, 'name'): - stopword_filename = basename(stopword_file.name) - if not stopword_filename: - raise ValueError('stopword_filename must be provided') - form_data['stopword_file'] = (stopword_filename, stopword_file, - 'application/octet-stream') - - url = '/v1/environments/{0}/collections/{1}/word_lists/stopwords'.format( - *self._encode_path_vars(environment_id, collection_id)) - response = self.request( - method='POST', - url=url, - headers=headers, - params=params, - files=form_data, - accept_json=True) - return response - - def create_tokenization_dictionary(self, - environment_id, - collection_id, - tokenization_rules=None, - **kwargs): - """ - Create tokenization dictionary. - - Upload a custom tokenization dictionary to use with the specified collection. - - :param str environment_id: The ID of the environment. - :param str collection_id: The ID of the collection. - :param list[TokenDictRule] tokenization_rules: An array of tokenization rules. - Each rule contains, the original `text` string, component `tokens`, any alternate - character set `readings`, and which `part_of_speech` the text is from. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if collection_id is None: - raise ValueError('collection_id must be provided') - if tokenization_rules is not None: - tokenization_rules = [ - self._convert_model(x, TokenDictRule) - for x in tokenization_rules - ] - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', - 'create_tokenization_dictionary') - headers.update(sdk_headers) - - params = {'version': self.version} - - data = {'tokenization_rules': tokenization_rules} - - url = '/v1/environments/{0}/collections/{1}/word_lists/tokenization_dictionary'.format( - *self._encode_path_vars(environment_id, collection_id)) - response = self.request( - method='POST', - url=url, - headers=headers, - params=params, - json=data, - accept_json=True) - return response - - def delete_expansions(self, environment_id, collection_id, **kwargs): - """ - Delete the expansion list. - - Remove the expansion information for this collection. The expansion list must be - deleted to disable query expansion for a collection. - - :param str environment_id: The ID of the environment. - :param str collection_id: The ID of the collection. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if collection_id is None: - raise ValueError('collection_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'delete_expansions') - headers.update(sdk_headers) - - params = {'version': self.version} - - url = '/v1/environments/{0}/collections/{1}/expansions'.format( - *self._encode_path_vars(environment_id, collection_id)) - response = self.request( - method='DELETE', - url=url, - headers=headers, - params=params, - accept_json=False) - return response - - def delete_stopword_list(self, environment_id, collection_id, **kwargs): - """ - Delete a custom stopword list. - - Delete a custom stopword list from the collection. After a custom stopword list is - deleted, the default list is used for the collection. - - :param str environment_id: The ID of the environment. - :param str collection_id: The ID of the collection. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if collection_id is None: - raise ValueError('collection_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'delete_stopword_list') - headers.update(sdk_headers) - - params = {'version': self.version} - - url = '/v1/environments/{0}/collections/{1}/word_lists/stopwords'.format( - *self._encode_path_vars(environment_id, collection_id)) - response = self.request( - method='DELETE', - url=url, - headers=headers, - params=params, - accept_json=False) - return response - - def delete_tokenization_dictionary(self, environment_id, collection_id, - **kwargs): - """ - Delete tokenization dictionary. - - Delete the tokenization dictionary from the collection. - - :param str environment_id: The ID of the environment. - :param str collection_id: The ID of the collection. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if collection_id is None: - raise ValueError('collection_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', - 'delete_tokenization_dictionary') - headers.update(sdk_headers) - - params = {'version': self.version} - - url = '/v1/environments/{0}/collections/{1}/word_lists/tokenization_dictionary'.format( - *self._encode_path_vars(environment_id, collection_id)) - response = self.request( - method='DELETE', - url=url, - headers=headers, - params=params, - accept_json=False) - return response - - def get_stopword_list_status(self, environment_id, collection_id, **kwargs): - """ - Get stopword list status. - - Returns the current status of the stopword list for the specified collection. - - :param str environment_id: The ID of the environment. - :param str collection_id: The ID of the collection. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if collection_id is None: - raise ValueError('collection_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', - 'get_stopword_list_status') - headers.update(sdk_headers) - - params = {'version': self.version} - - url = '/v1/environments/{0}/collections/{1}/word_lists/stopwords'.format( - *self._encode_path_vars(environment_id, collection_id)) - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def get_tokenization_dictionary_status(self, environment_id, collection_id, - **kwargs): - """ - Get tokenization dictionary status. - - Returns the current status of the tokenization dictionary for the specified - collection. - - :param str environment_id: The ID of the environment. - :param str collection_id: The ID of the collection. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if collection_id is None: - raise ValueError('collection_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', - 'get_tokenization_dictionary_status') - headers.update(sdk_headers) - - params = {'version': self.version} - - url = '/v1/environments/{0}/collections/{1}/word_lists/tokenization_dictionary'.format( - *self._encode_path_vars(environment_id, collection_id)) - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def list_expansions(self, environment_id, collection_id, **kwargs): - """ - Get the expansion list. - - Returns the current expansion list for the specified collection. If an expansion - list is not specified, an object with empty expansion arrays is returned. - - :param str environment_id: The ID of the environment. - :param str collection_id: The ID of the collection. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if collection_id is None: - raise ValueError('collection_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'list_expansions') - headers.update(sdk_headers) - - params = {'version': self.version} - - url = '/v1/environments/{0}/collections/{1}/expansions'.format( - *self._encode_path_vars(environment_id, collection_id)) - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - ######################### - # Documents - ######################### - - def add_document(self, - environment_id, - collection_id, - file=None, - filename=None, - file_content_type=None, - metadata=None, - **kwargs): - """ - Add a document. - - Add a document to a collection with optional metadata. - * The **version** query parameter is still required. - * Returns immediately after the system has accepted the document for processing. - * The user must provide document content, metadata, or both. If the request is - missing both document content and metadata, it is rejected. - * The user can set the **Content-Type** parameter on the **file** part to - indicate the media type of the document. If the **Content-Type** parameter is - missing or is one of the generic media types (for example, - `application/octet-stream`), then the service attempts to automatically detect the - document's media type. - * The following field names are reserved and will be filtered out if present - after normalization: `id`, `score`, `highlight`, and any field with the prefix of: - `_`, `+`, or `-` - * Fields with empty name values after normalization are filtered out before - indexing. - * Fields containing the following characters after normalization are filtered - out before indexing: `#` and `,` - **Note:** Documents can be added with a specific **document_id** by using the - **_/v1/environments/{environment_id}/collections/{collection_id}/documents** - method. - - :param str environment_id: The ID of the environment. - :param str collection_id: The ID of the collection. - :param file file: The content of the document to ingest. The maximum supported - file size when adding a file to a collection is 50 megabytes, the maximum - supported file size when testing a confiruration is 1 megabyte. Files larger than - the supported size are rejected. - :param str filename: The filename for file. - :param str file_content_type: The content type of file. - :param str metadata: If you're using the Data Crawler to upload your documents, - you can test a document against the type of metadata that the Data Crawler might - send. The maximum supported metadata file size is 1 MB. Metadata parts larger than - 1 MB are rejected. - Example: ``` { - \"Creator\": \"Johnny Appleseed\", - \"Subject\": \"Apples\" - } ```. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if collection_id is None: - raise ValueError('collection_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'add_document') - headers.update(sdk_headers) - - params = {'version': self.version} - - form_data = {} - if file: - if not filename and hasattr(file, 'name'): - filename = basename(file.name) - if not filename: - raise ValueError('filename must be provided') - form_data['file'] = (filename, file, file_content_type or - 'application/octet-stream') - if metadata: - form_data['metadata'] = (None, metadata, 'text/plain') - - url = '/v1/environments/{0}/collections/{1}/documents'.format( - *self._encode_path_vars(environment_id, collection_id)) - response = self.request( - method='POST', - url=url, - headers=headers, - params=params, - files=form_data, - accept_json=True) - return response - - def delete_document(self, environment_id, collection_id, document_id, - **kwargs): - """ - Delete a document. - - If the given document ID is invalid, or if the document is not found, then the a - success response is returned (HTTP status code `200`) with the status set to - 'deleted'. - - :param str environment_id: The ID of the environment. - :param str collection_id: The ID of the collection. - :param str document_id: The ID of the document. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if collection_id is None: - raise ValueError('collection_id must be provided') - if document_id is None: - raise ValueError('document_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'delete_document') - headers.update(sdk_headers) - - params = {'version': self.version} - - url = '/v1/environments/{0}/collections/{1}/documents/{2}'.format( - *self._encode_path_vars(environment_id, collection_id, document_id)) - response = self.request( - method='DELETE', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def get_document_status(self, environment_id, collection_id, document_id, - **kwargs): - """ - Get document details. - - Fetch status details about a submitted document. **Note:** this operation does not - return the document itself. Instead, it returns only the document's processing - status and any notices (warnings or errors) that were generated when the document - was ingested. Use the query API to retrieve the actual document content. - - :param str environment_id: The ID of the environment. - :param str collection_id: The ID of the collection. - :param str document_id: The ID of the document. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if collection_id is None: - raise ValueError('collection_id must be provided') - if document_id is None: - raise ValueError('document_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'get_document_status') - headers.update(sdk_headers) - - params = {'version': self.version} - - url = '/v1/environments/{0}/collections/{1}/documents/{2}'.format( - *self._encode_path_vars(environment_id, collection_id, document_id)) - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def update_document(self, - environment_id, - collection_id, - document_id, - file=None, - filename=None, - file_content_type=None, - metadata=None, - **kwargs): - """ - Update a document. - - Replace an existing document or add a document with a specified **document_id**. - Starts ingesting a document with optional metadata. - **Note:** When uploading a new document with this method it automatically replaces - any document stored with the same **document_id** if it exists. - - :param str environment_id: The ID of the environment. - :param str collection_id: The ID of the collection. - :param str document_id: The ID of the document. - :param file file: The content of the document to ingest. The maximum supported - file size when adding a file to a collection is 50 megabytes, the maximum - supported file size when testing a confiruration is 1 megabyte. Files larger than - the supported size are rejected. - :param str filename: The filename for file. - :param str file_content_type: The content type of file. - :param str metadata: If you're using the Data Crawler to upload your documents, - you can test a document against the type of metadata that the Data Crawler might - send. The maximum supported metadata file size is 1 MB. Metadata parts larger than - 1 MB are rejected. - Example: ``` { - \"Creator\": \"Johnny Appleseed\", - \"Subject\": \"Apples\" - } ```. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if collection_id is None: - raise ValueError('collection_id must be provided') - if document_id is None: - raise ValueError('document_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'update_document') - headers.update(sdk_headers) - - params = {'version': self.version} - - form_data = {} - if file: - if not filename and hasattr(file, 'name'): - filename = basename(file.name) - if not filename: - raise ValueError('filename must be provided') - form_data['file'] = (filename, file, file_content_type or - 'application/octet-stream') - if metadata: - form_data['metadata'] = (None, metadata, 'text/plain') - - url = '/v1/environments/{0}/collections/{1}/documents/{2}'.format( - *self._encode_path_vars(environment_id, collection_id, document_id)) - response = self.request( - method='POST', - url=url, - headers=headers, - params=params, - files=form_data, - accept_json=True) - return response - - ######################### - # Queries - ######################### - - def federated_query(self, - environment_id, - filter=None, - query=None, - natural_language_query=None, - passages=None, - aggregation=None, - count=None, - return_fields=None, - offset=None, - sort=None, - highlight=None, - passages_fields=None, - passages_count=None, - passages_characters=None, - deduplicate=None, - deduplicate_field=None, - collection_ids=None, - similar=None, - similar_document_ids=None, - similar_fields=None, - bias=None, - logging_opt_out=None, - **kwargs): - """ - Long environment queries. - - Complex queries might be too long for a standard method query. By using this - method, you can construct longer queries. However, these queries may take longer - to complete than the standard method. For details, see the [Discovery service - documentation](https://cloud.ibm.com/docs/services/discovery?topic=discovery-query-concepts#query-concepts). - - :param str environment_id: The ID of the environment. - :param str filter: A cacheable query that excludes documents that don't mention - the query content. Filter searches are better for metadata-type searches and for - assessing the concepts in the data set. - :param str query: A query search returns all documents in your data set with full - enrichments and full text, but with the most relevant documents listed first. Use - a query search when you want to find the most relevant search results. You cannot - use **natural_language_query** and **query** at the same time. - :param str natural_language_query: A natural language query that returns relevant - documents by utilizing training data and natural language understanding. You - cannot use **natural_language_query** and **query** at the same time. - :param bool passages: A passages query that returns the most relevant passages - from the results. - :param str aggregation: An aggregation search that returns an exact answer by - combining query search with filters. Useful for applications to build lists, - tables, and time series. For a full list of possible aggregations, see the Query - reference. - :param int count: Number of results to return. - :param str return_fields: A comma-separated list of the portion of the document - hierarchy to return. - :param int offset: The number of query results to skip at the beginning. For - example, if the total number of results that are returned is 10 and the offset is - 8, it returns the last two results. - :param str sort: A comma-separated list of fields in the document to sort on. You - can optionally specify a sort direction by prefixing the field with `-` for - descending or `+` for ascending. Ascending is the default sort direction if no - prefix is specified. This parameter cannot be used in the same query as the - **bias** parameter. - :param bool highlight: When true, a highlight field is returned for each result - which contains the fields which match the query with `` tags around the - matching query terms. - :param str passages_fields: A comma-separated list of fields that passages are - drawn from. If this parameter not specified, then all top-level fields are - included. - :param int passages_count: The maximum number of passages to return. The search - returns fewer passages if the requested total is not found. The default is `10`. - The maximum is `100`. - :param int passages_characters: The approximate number of characters that any one - passage will have. - :param bool deduplicate: When `true`, and used with a Watson Discovery News - collection, duplicate results (based on the contents of the **title** field) are - removed. Duplicate comparison is limited to the current query only; **offset** is - not considered. This parameter is currently Beta functionality. - :param str deduplicate_field: When specified, duplicate results based on the field - specified are removed from the returned results. Duplicate comparison is limited - to the current query only, **offset** is not considered. This parameter is - currently Beta functionality. - :param str collection_ids: A comma-separated list of collection IDs to be queried - against. Required when querying multiple collections, invalid when performing a - single collection query. - :param bool similar: When `true`, results are returned based on their similarity - to the document IDs specified in the **similar.document_ids** parameter. - :param str similar_document_ids: A comma-separated list of document IDs to find - similar documents. - **Tip:** Include the **natural_language_query** parameter to expand the scope of - the document similarity search with the natural language query. Other query - parameters, such as **filter** and **query**, are subsequently applied and reduce - the scope. - :param str similar_fields: A comma-separated list of field names that are used as - a basis for comparison to identify similar documents. If not specified, the entire - document is used for comparison. - :param str bias: Field which the returned results will be biased against. The - specified field must be either a **date** or **number** format. When a **date** - type field is specified returned results are biased towards field values closer to - the current date. When a **number** type field is specified, returned results are - biased towards higher field values. This parameter cannot be used in the same - query as the **sort** parameter. - :param bool logging_opt_out: If `true`, queries are not stored in the Discovery - **Logs** endpoint. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - - headers = {'X-Watson-Logging-Opt-Out': logging_opt_out} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'federated_query') - headers.update(sdk_headers) - - params = {'version': self.version} - - data = { - 'filter': filter, - 'query': query, - 'natural_language_query': natural_language_query, - 'passages': passages, - 'aggregation': aggregation, - 'count': count, - 'return': return_fields, - 'offset': offset, - 'sort': sort, - 'highlight': highlight, - 'passages.fields': passages_fields, - 'passages.count': passages_count, - 'passages.characters': passages_characters, - 'deduplicate': deduplicate, - 'deduplicate.field': deduplicate_field, - 'collection_ids': collection_ids, - 'similar': similar, - 'similar.document_ids': similar_document_ids, - 'similar.fields': similar_fields, - 'bias': bias - } - - url = '/v1/environments/{0}/query'.format( - *self._encode_path_vars(environment_id)) - response = self.request( - method='POST', - url=url, - headers=headers, - params=params, - json=data, - accept_json=True) - return response - - def federated_query_notices(self, - environment_id, - collection_ids, - filter=None, - query=None, - natural_language_query=None, - aggregation=None, - count=None, - return_fields=None, - offset=None, - sort=None, - highlight=None, - deduplicate_field=None, - similar=None, - similar_document_ids=None, - similar_fields=None, - **kwargs): - """ - Query multiple collection system notices. - - Queries for notices (errors or warnings) that might have been generated by the - system. Notices are generated when ingesting documents and performing relevance - training. See the [Discovery service - documentation](https://cloud.ibm.com/docs/services/discovery?topic=discovery-query-concepts#query-concepts) - for more details on the query language. - - :param str environment_id: The ID of the environment. - :param list[str] collection_ids: A comma-separated list of collection IDs to be - queried against. - :param str filter: A cacheable query that excludes documents that don't mention - the query content. Filter searches are better for metadata-type searches and for - assessing the concepts in the data set. - :param str query: A query search returns all documents in your data set with full - enrichments and full text, but with the most relevant documents listed first. Use - a query search when you want to find the most relevant search results. You cannot - use **natural_language_query** and **query** at the same time. - :param str natural_language_query: A natural language query that returns relevant - documents by utilizing training data and natural language understanding. You - cannot use **natural_language_query** and **query** at the same time. - :param str aggregation: An aggregation search that returns an exact answer by - combining query search with filters. Useful for applications to build lists, - tables, and time series. For a full list of possible aggregations, see the Query - reference. - :param int count: Number of results to return. The maximum for the **count** and - **offset** values together in any one query is **10000**. - :param list[str] return_fields: A comma-separated list of the portion of the - document hierarchy to return. - :param int offset: The number of query results to skip at the beginning. For - example, if the total number of results that are returned is 10 and the offset is - 8, it returns the last two results. The maximum for the **count** and **offset** - values together in any one query is **10000**. - :param list[str] sort: A comma-separated list of fields in the document to sort - on. You can optionally specify a sort direction by prefixing the field with `-` - for descending or `+` for ascending. Ascending is the default sort direction if no - prefix is specified. - :param bool highlight: When true, a highlight field is returned for each result - which contains the fields which match the query with `` tags around the - matching query terms. - :param str deduplicate_field: When specified, duplicate results based on the field - specified are removed from the returned results. Duplicate comparison is limited - to the current query only, **offset** is not considered. This parameter is - currently Beta functionality. - :param bool similar: When `true`, results are returned based on their similarity - to the document IDs specified in the **similar.document_ids** parameter. - :param list[str] similar_document_ids: A comma-separated list of document IDs to - find similar documents. - **Tip:** Include the **natural_language_query** parameter to expand the scope of - the document similarity search with the natural language query. Other query - parameters, such as **filter** and **query**, are subsequently applied and reduce - the scope. - :param list[str] similar_fields: A comma-separated list of field names that are - used as a basis for comparison to identify similar documents. If not specified, - the entire document is used for comparison. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if collection_ids is None: - raise ValueError('collection_ids must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', - 'federated_query_notices') - headers.update(sdk_headers) - - params = { - 'version': self.version, - 'collection_ids': self._convert_list(collection_ids), - 'filter': filter, - 'query': query, - 'natural_language_query': natural_language_query, - 'aggregation': aggregation, - 'count': count, - 'return': self._convert_list(return_fields), - 'offset': offset, - 'sort': self._convert_list(sort), - 'highlight': highlight, - 'deduplicate.field': deduplicate_field, - 'similar': similar, - 'similar.document_ids': self._convert_list(similar_document_ids), - 'similar.fields': self._convert_list(similar_fields) - } - - url = '/v1/environments/{0}/notices'.format( - *self._encode_path_vars(environment_id)) - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def query(self, - environment_id, - collection_id, - filter=None, - query=None, - natural_language_query=None, - passages=None, - aggregation=None, - count=None, - return_fields=None, - offset=None, - sort=None, - highlight=None, - passages_fields=None, - passages_count=None, - passages_characters=None, - deduplicate=None, - deduplicate_field=None, - collection_ids=None, - similar=None, - similar_document_ids=None, - similar_fields=None, - bias=None, - logging_opt_out=None, - **kwargs): - """ - Long collection queries. - - Complex queries might be too long for a standard method query. By using this - method, you can construct longer queries. However, these queries may take longer - to complete than the standard method. For details, see the [Discovery service - documentation](https://cloud.ibm.com/docs/services/discovery?topic=discovery-query-concepts#query-concepts). - - :param str environment_id: The ID of the environment. - :param str collection_id: The ID of the collection. - :param str filter: A cacheable query that excludes documents that don't mention - the query content. Filter searches are better for metadata-type searches and for - assessing the concepts in the data set. - :param str query: A query search returns all documents in your data set with full - enrichments and full text, but with the most relevant documents listed first. Use - a query search when you want to find the most relevant search results. You cannot - use **natural_language_query** and **query** at the same time. - :param str natural_language_query: A natural language query that returns relevant - documents by utilizing training data and natural language understanding. You - cannot use **natural_language_query** and **query** at the same time. - :param bool passages: A passages query that returns the most relevant passages - from the results. - :param str aggregation: An aggregation search that returns an exact answer by - combining query search with filters. Useful for applications to build lists, - tables, and time series. For a full list of possible aggregations, see the Query - reference. - :param int count: Number of results to return. - :param str return_fields: A comma-separated list of the portion of the document - hierarchy to return. - :param int offset: The number of query results to skip at the beginning. For - example, if the total number of results that are returned is 10 and the offset is - 8, it returns the last two results. - :param str sort: A comma-separated list of fields in the document to sort on. You - can optionally specify a sort direction by prefixing the field with `-` for - descending or `+` for ascending. Ascending is the default sort direction if no - prefix is specified. This parameter cannot be used in the same query as the - **bias** parameter. - :param bool highlight: When true, a highlight field is returned for each result - which contains the fields which match the query with `` tags around the - matching query terms. - :param str passages_fields: A comma-separated list of fields that passages are - drawn from. If this parameter not specified, then all top-level fields are - included. - :param int passages_count: The maximum number of passages to return. The search - returns fewer passages if the requested total is not found. The default is `10`. - The maximum is `100`. - :param int passages_characters: The approximate number of characters that any one - passage will have. - :param bool deduplicate: When `true`, and used with a Watson Discovery News - collection, duplicate results (based on the contents of the **title** field) are - removed. Duplicate comparison is limited to the current query only; **offset** is - not considered. This parameter is currently Beta functionality. - :param str deduplicate_field: When specified, duplicate results based on the field - specified are removed from the returned results. Duplicate comparison is limited - to the current query only, **offset** is not considered. This parameter is - currently Beta functionality. - :param str collection_ids: A comma-separated list of collection IDs to be queried - against. Required when querying multiple collections, invalid when performing a - single collection query. - :param bool similar: When `true`, results are returned based on their similarity - to the document IDs specified in the **similar.document_ids** parameter. - :param str similar_document_ids: A comma-separated list of document IDs to find - similar documents. - **Tip:** Include the **natural_language_query** parameter to expand the scope of - the document similarity search with the natural language query. Other query - parameters, such as **filter** and **query**, are subsequently applied and reduce - the scope. - :param str similar_fields: A comma-separated list of field names that are used as - a basis for comparison to identify similar documents. If not specified, the entire - document is used for comparison. - :param str bias: Field which the returned results will be biased against. The - specified field must be either a **date** or **number** format. When a **date** - type field is specified returned results are biased towards field values closer to - the current date. When a **number** type field is specified, returned results are - biased towards higher field values. This parameter cannot be used in the same - query as the **sort** parameter. - :param bool logging_opt_out: If `true`, queries are not stored in the Discovery - **Logs** endpoint. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if collection_id is None: - raise ValueError('collection_id must be provided') - - headers = {'X-Watson-Logging-Opt-Out': logging_opt_out} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'query') - headers.update(sdk_headers) - - params = {'version': self.version} - - data = { - 'filter': filter, - 'query': query, - 'natural_language_query': natural_language_query, - 'passages': passages, - 'aggregation': aggregation, - 'count': count, - 'return': return_fields, - 'offset': offset, - 'sort': sort, - 'highlight': highlight, - 'passages.fields': passages_fields, - 'passages.count': passages_count, - 'passages.characters': passages_characters, - 'deduplicate': deduplicate, - 'deduplicate.field': deduplicate_field, - 'collection_ids': collection_ids, - 'similar': similar, - 'similar.document_ids': similar_document_ids, - 'similar.fields': similar_fields, - 'bias': bias - } - - url = '/v1/environments/{0}/collections/{1}/query'.format( - *self._encode_path_vars(environment_id, collection_id)) - response = self.request( - method='POST', - url=url, - headers=headers, - params=params, - json=data, - accept_json=True) - return response - - def query_entities(self, - environment_id, - collection_id, - feature=None, - entity=None, - context=None, - count=None, - evidence_count=None, - **kwargs): - """ - Knowledge Graph entity query. - - See the [Knowledge Graph - documentation](https://cloud.ibm.com/docs/services/discovery?topic=discovery-kg#kg) - for more details. - - :param str environment_id: The ID of the environment. - :param str collection_id: The ID of the collection. - :param str feature: The entity query feature to perform. Supported features are - `disambiguate` and `similar_entities`. - :param QueryEntitiesEntity entity: A text string that appears within the entity - text field. - :param QueryEntitiesContext context: Entity text to provide context for the - queried entity and rank based on that association. For example, if you wanted to - query the city of London in England your query would look for `London` with the - context of `England`. - :param int count: The number of results to return. The default is `10`. The - maximum is `1000`. - :param int evidence_count: The number of evidence items to return for each result. - The default is `0`. The maximum number of evidence items per query is 10,000. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if collection_id is None: - raise ValueError('collection_id must be provided') - if entity is not None: - entity = self._convert_model(entity, QueryEntitiesEntity) - if context is not None: - context = self._convert_model(context, QueryEntitiesContext) - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'query_entities') - headers.update(sdk_headers) - - params = {'version': self.version} - - data = { - 'feature': feature, - 'entity': entity, - 'context': context, - 'count': count, - 'evidence_count': evidence_count - } - - url = '/v1/environments/{0}/collections/{1}/query_entities'.format( - *self._encode_path_vars(environment_id, collection_id)) - response = self.request( - method='POST', - url=url, - headers=headers, - params=params, - json=data, - accept_json=True) - return response - - def query_notices(self, - environment_id, - collection_id, - filter=None, - query=None, - natural_language_query=None, - passages=None, - aggregation=None, - count=None, - return_fields=None, - offset=None, - sort=None, - highlight=None, - passages_fields=None, - passages_count=None, - passages_characters=None, - deduplicate_field=None, - similar=None, - similar_document_ids=None, - similar_fields=None, - **kwargs): - """ - Query system notices. - - Queries for notices (errors or warnings) that might have been generated by the - system. Notices are generated when ingesting documents and performing relevance - training. See the [Discovery service - documentation](https://cloud.ibm.com/docs/services/discovery?topic=discovery-query-concepts#query-concepts) - for more details on the query language. - - :param str environment_id: The ID of the environment. - :param str collection_id: The ID of the collection. - :param str filter: A cacheable query that excludes documents that don't mention - the query content. Filter searches are better for metadata-type searches and for - assessing the concepts in the data set. - :param str query: A query search returns all documents in your data set with full - enrichments and full text, but with the most relevant documents listed first. Use - a query search when you want to find the most relevant search results. You cannot - use **natural_language_query** and **query** at the same time. - :param str natural_language_query: A natural language query that returns relevant - documents by utilizing training data and natural language understanding. You - cannot use **natural_language_query** and **query** at the same time. - :param bool passages: A passages query that returns the most relevant passages - from the results. - :param str aggregation: An aggregation search that returns an exact answer by - combining query search with filters. Useful for applications to build lists, - tables, and time series. For a full list of possible aggregations, see the Query - reference. - :param int count: Number of results to return. The maximum for the **count** and - **offset** values together in any one query is **10000**. - :param list[str] return_fields: A comma-separated list of the portion of the - document hierarchy to return. - :param int offset: The number of query results to skip at the beginning. For - example, if the total number of results that are returned is 10 and the offset is - 8, it returns the last two results. The maximum for the **count** and **offset** - values together in any one query is **10000**. - :param list[str] sort: A comma-separated list of fields in the document to sort - on. You can optionally specify a sort direction by prefixing the field with `-` - for descending or `+` for ascending. Ascending is the default sort direction if no - prefix is specified. - :param bool highlight: When true, a highlight field is returned for each result - which contains the fields which match the query with `` tags around the - matching query terms. - :param list[str] passages_fields: A comma-separated list of fields that passages - are drawn from. If this parameter not specified, then all top-level fields are - included. - :param int passages_count: The maximum number of passages to return. The search - returns fewer passages if the requested total is not found. - :param int passages_characters: The approximate number of characters that any one - passage will have. - :param str deduplicate_field: When specified, duplicate results based on the field - specified are removed from the returned results. Duplicate comparison is limited - to the current query only, **offset** is not considered. This parameter is - currently Beta functionality. - :param bool similar: When `true`, results are returned based on their similarity - to the document IDs specified in the **similar.document_ids** parameter. - :param list[str] similar_document_ids: A comma-separated list of document IDs to - find similar documents. - **Tip:** Include the **natural_language_query** parameter to expand the scope of - the document similarity search with the natural language query. Other query - parameters, such as **filter** and **query**, are subsequently applied and reduce - the scope. - :param list[str] similar_fields: A comma-separated list of field names that are - used as a basis for comparison to identify similar documents. If not specified, - the entire document is used for comparison. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if collection_id is None: - raise ValueError('collection_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'query_notices') - headers.update(sdk_headers) - - params = { - 'version': self.version, - 'filter': filter, - 'query': query, - 'natural_language_query': natural_language_query, - 'passages': passages, - 'aggregation': aggregation, - 'count': count, - 'return': self._convert_list(return_fields), - 'offset': offset, - 'sort': self._convert_list(sort), - 'highlight': highlight, - 'passages.fields': self._convert_list(passages_fields), - 'passages.count': passages_count, - 'passages.characters': passages_characters, - 'deduplicate.field': deduplicate_field, - 'similar': similar, - 'similar.document_ids': self._convert_list(similar_document_ids), - 'similar.fields': self._convert_list(similar_fields) - } - - url = '/v1/environments/{0}/collections/{1}/notices'.format( - *self._encode_path_vars(environment_id, collection_id)) - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def query_relations(self, - environment_id, - collection_id, - entities=None, - context=None, - sort=None, - filter=None, - count=None, - evidence_count=None, - **kwargs): - """ - Knowledge Graph relationship query. - - See the [Knowledge Graph - documentation](https://cloud.ibm.com/docs/services/discovery?topic=discovery-kg#kg) - for more details. - - :param str environment_id: The ID of the environment. - :param str collection_id: The ID of the collection. - :param list[QueryRelationsEntity] entities: An array of entities to find - relationships for. - :param QueryEntitiesContext context: Entity text to provide context for the - queried entity and rank based on that association. For example, if you wanted to - query the city of London in England your query would look for `London` with the - context of `England`. - :param str sort: The sorting method for the relationships, can be `score` or - `frequency`. `frequency` is the number of unique times each entity is identified. - The default is `score`. This parameter cannot be used in the same query as the - **bias** parameter. - :param QueryRelationsFilter filter: - :param int count: The number of results to return. The default is `10`. The - maximum is `1000`. - :param int evidence_count: The number of evidence items to return for each result. - The default is `0`. The maximum number of evidence items per query is 10,000. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if collection_id is None: - raise ValueError('collection_id must be provided') - if entities is not None: - entities = [ - self._convert_model(x, QueryRelationsEntity) for x in entities - ] - if context is not None: - context = self._convert_model(context, QueryEntitiesContext) - if filter is not None: - filter = self._convert_model(filter, QueryRelationsFilter) - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'query_relations') - headers.update(sdk_headers) - - params = {'version': self.version} - - data = { - 'entities': entities, - 'context': context, - 'sort': sort, - 'filter': filter, - 'count': count, - 'evidence_count': evidence_count - } - - url = '/v1/environments/{0}/collections/{1}/query_relations'.format( - *self._encode_path_vars(environment_id, collection_id)) - response = self.request( - method='POST', - url=url, - headers=headers, - params=params, - json=data, - accept_json=True) - return response - - ######################### - # Training data - ######################### - - def add_training_data(self, - environment_id, - collection_id, - natural_language_query=None, - filter=None, - examples=None, - **kwargs): - """ - Add query to training data. - - Adds a query to the training data for this collection. The query can contain a - filter and natural language query. - - :param str environment_id: The ID of the environment. - :param str collection_id: The ID of the collection. - :param str natural_language_query: The natural text query for the new training - query. - :param str filter: The filter used on the collection before the - **natural_language_query** is applied. - :param list[TrainingExample] examples: Array of training examples. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if collection_id is None: - raise ValueError('collection_id must be provided') - if examples is not None: - examples = [ - self._convert_model(x, TrainingExample) for x in examples - ] - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'add_training_data') - headers.update(sdk_headers) - - params = {'version': self.version} - - data = { - 'natural_language_query': natural_language_query, - 'filter': filter, - 'examples': examples - } - - url = '/v1/environments/{0}/collections/{1}/training_data'.format( - *self._encode_path_vars(environment_id, collection_id)) - response = self.request( - method='POST', - url=url, - headers=headers, - params=params, - json=data, - accept_json=True) - return response - - def create_training_example(self, - environment_id, - collection_id, - query_id, - document_id=None, - cross_reference=None, - relevance=None, - **kwargs): - """ - Add example to training data query. - - Adds a example to this training data query. - - :param str environment_id: The ID of the environment. - :param str collection_id: The ID of the collection. - :param str query_id: The ID of the query used for training. - :param str document_id: The document ID associated with this training example. - :param str cross_reference: The cross reference associated with this training - example. - :param int relevance: The relevance of the training example. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if collection_id is None: - raise ValueError('collection_id must be provided') - if query_id is None: - raise ValueError('query_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', - 'create_training_example') - headers.update(sdk_headers) - - params = {'version': self.version} - - data = { - 'document_id': document_id, - 'cross_reference': cross_reference, - 'relevance': relevance - } - - url = '/v1/environments/{0}/collections/{1}/training_data/{2}/examples'.format( - *self._encode_path_vars(environment_id, collection_id, query_id)) - response = self.request( - method='POST', - url=url, - headers=headers, - params=params, - json=data, - accept_json=True) - return response - - def delete_all_training_data(self, environment_id, collection_id, **kwargs): - """ - Delete all training data. - - Deletes all training data from a collection. - - :param str environment_id: The ID of the environment. - :param str collection_id: The ID of the collection. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if collection_id is None: - raise ValueError('collection_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', - 'delete_all_training_data') - headers.update(sdk_headers) - - params = {'version': self.version} - - url = '/v1/environments/{0}/collections/{1}/training_data'.format( - *self._encode_path_vars(environment_id, collection_id)) - response = self.request( - method='DELETE', - url=url, - headers=headers, - params=params, - accept_json=False) - return response - - def delete_training_data(self, environment_id, collection_id, query_id, - **kwargs): - """ - Delete a training data query. - - Removes the training data query and all associated examples from the training data - set. - - :param str environment_id: The ID of the environment. - :param str collection_id: The ID of the collection. - :param str query_id: The ID of the query used for training. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if collection_id is None: - raise ValueError('collection_id must be provided') - if query_id is None: - raise ValueError('query_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'delete_training_data') - headers.update(sdk_headers) - - params = {'version': self.version} - - url = '/v1/environments/{0}/collections/{1}/training_data/{2}'.format( - *self._encode_path_vars(environment_id, collection_id, query_id)) - response = self.request( - method='DELETE', - url=url, - headers=headers, - params=params, - accept_json=False) - return response - - def delete_training_example(self, environment_id, collection_id, query_id, - example_id, **kwargs): - """ - Delete example for training data query. - - Deletes the example document with the given ID from the training data query. - - :param str environment_id: The ID of the environment. - :param str collection_id: The ID of the collection. - :param str query_id: The ID of the query used for training. - :param str example_id: The ID of the document as it is indexed. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if collection_id is None: - raise ValueError('collection_id must be provided') - if query_id is None: - raise ValueError('query_id must be provided') - if example_id is None: - raise ValueError('example_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', - 'delete_training_example') - headers.update(sdk_headers) - - params = {'version': self.version} - - url = '/v1/environments/{0}/collections/{1}/training_data/{2}/examples/{3}'.format( - *self._encode_path_vars(environment_id, collection_id, query_id, - example_id)) - response = self.request( - method='DELETE', - url=url, - headers=headers, - params=params, - accept_json=False) - return response - - def get_training_data(self, environment_id, collection_id, query_id, - **kwargs): - """ - Get details about a query. - - Gets details for a specific training data query, including the query string and - all examples. - - :param str environment_id: The ID of the environment. - :param str collection_id: The ID of the collection. - :param str query_id: The ID of the query used for training. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if collection_id is None: - raise ValueError('collection_id must be provided') - if query_id is None: - raise ValueError('query_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'get_training_data') - headers.update(sdk_headers) - - params = {'version': self.version} - - url = '/v1/environments/{0}/collections/{1}/training_data/{2}'.format( - *self._encode_path_vars(environment_id, collection_id, query_id)) - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def get_training_example(self, environment_id, collection_id, query_id, - example_id, **kwargs): - """ - Get details for training data example. - - Gets the details for this training example. - - :param str environment_id: The ID of the environment. - :param str collection_id: The ID of the collection. - :param str query_id: The ID of the query used for training. - :param str example_id: The ID of the document as it is indexed. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if collection_id is None: - raise ValueError('collection_id must be provided') - if query_id is None: - raise ValueError('query_id must be provided') - if example_id is None: - raise ValueError('example_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'get_training_example') - headers.update(sdk_headers) - - params = {'version': self.version} - - url = '/v1/environments/{0}/collections/{1}/training_data/{2}/examples/{3}'.format( - *self._encode_path_vars(environment_id, collection_id, query_id, - example_id)) - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def list_training_data(self, environment_id, collection_id, **kwargs): - """ - List training data. - - Lists the training data for the specified collection. - - :param str environment_id: The ID of the environment. - :param str collection_id: The ID of the collection. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if collection_id is None: - raise ValueError('collection_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'list_training_data') - headers.update(sdk_headers) - - params = {'version': self.version} - - url = '/v1/environments/{0}/collections/{1}/training_data'.format( - *self._encode_path_vars(environment_id, collection_id)) - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def list_training_examples(self, environment_id, collection_id, query_id, - **kwargs): - """ - List examples for a training data query. - - List all examples for this training data query. - - :param str environment_id: The ID of the environment. - :param str collection_id: The ID of the collection. - :param str query_id: The ID of the query used for training. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if collection_id is None: - raise ValueError('collection_id must be provided') - if query_id is None: - raise ValueError('query_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', - 'list_training_examples') - headers.update(sdk_headers) - - params = {'version': self.version} - - url = '/v1/environments/{0}/collections/{1}/training_data/{2}/examples'.format( - *self._encode_path_vars(environment_id, collection_id, query_id)) - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def update_training_example(self, - environment_id, - collection_id, - query_id, - example_id, - cross_reference=None, - relevance=None, - **kwargs): - """ - Change label or cross reference for example. - - Changes the label or cross reference query for this training data example. - - :param str environment_id: The ID of the environment. - :param str collection_id: The ID of the collection. - :param str query_id: The ID of the query used for training. - :param str example_id: The ID of the document as it is indexed. - :param str cross_reference: The example to add. - :param int relevance: The relevance value for this example. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if collection_id is None: - raise ValueError('collection_id must be provided') - if query_id is None: - raise ValueError('query_id must be provided') - if example_id is None: - raise ValueError('example_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', - 'update_training_example') - headers.update(sdk_headers) - - params = {'version': self.version} - - data = {'cross_reference': cross_reference, 'relevance': relevance} - - url = '/v1/environments/{0}/collections/{1}/training_data/{2}/examples/{3}'.format( - *self._encode_path_vars(environment_id, collection_id, query_id, - example_id)) - response = self.request( - method='PUT', - url=url, - headers=headers, - params=params, - json=data, - accept_json=True) - return response - - ######################### - # User data - ######################### - - def delete_user_data(self, customer_id, **kwargs): - """ - Delete labeled data. - - Deletes all data associated with a specified customer ID. The method has no effect - if no data is associated with the customer ID. - You associate a customer ID with data by passing the **X-Watson-Metadata** header - with a request that passes data. For more information about personal data and - customer IDs, see [Information - security](https://cloud.ibm.com/docs/services/discovery?topic=discovery-information-security#information-security). - - :param str customer_id: The customer ID for which all data is to be deleted. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if customer_id is None: - raise ValueError('customer_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'delete_user_data') - headers.update(sdk_headers) - - params = {'version': self.version, 'customer_id': customer_id} - - url = '/v1/user_data' - response = self.request( - method='DELETE', - url=url, - headers=headers, - params=params, - accept_json=False) - return response - - ######################### - # Events and feedback - ######################### - - def create_event(self, type, data, **kwargs): - """ - Create event. - - The **Events** API can be used to create log entries that are associated with - specific queries. For example, you can record which documents in the results set - were \"clicked\" by a user and when that click occured. - - :param str type: The event type to be created. - :param EventData data: Query event data object. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if type is None: - raise ValueError('type must be provided') - if data is None: - raise ValueError('data must be provided') - data = self._convert_model(data, EventData) - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'create_event') - headers.update(sdk_headers) - - params = {'version': self.version} - - data = {'type': type, 'data': data} - - url = '/v1/events' - response = self.request( - method='POST', - url=url, - headers=headers, - params=params, - json=data, - accept_json=True) - return response - - def get_metrics_event_rate(self, - start_time=None, - end_time=None, - result_type=None, - **kwargs): - """ - Percentage of queries with an associated event. - - The percentage of queries using the **natural_language_query** parameter that have - a corresponding \"click\" event over a specified time window. This metric - requires having integrated event tracking in your application using the **Events** - API. - - :param datetime start_time: Metric is computed from data recorded after this - timestamp; must be in `YYYY-MM-DDThh:mm:ssZ` format. - :param datetime end_time: Metric is computed from data recorded before this - timestamp; must be in `YYYY-MM-DDThh:mm:ssZ` format. - :param str result_type: The type of result to consider when calculating the - metric. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', - 'get_metrics_event_rate') - headers.update(sdk_headers) - - params = { - 'version': self.version, - 'start_time': start_time, - 'end_time': end_time, - 'result_type': result_type - } - - url = '/v1/metrics/event_rate' - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def get_metrics_query(self, - start_time=None, - end_time=None, - result_type=None, - **kwargs): - """ - Number of queries over time. - - Total number of queries using the **natural_language_query** parameter over a - specific time window. - - :param datetime start_time: Metric is computed from data recorded after this - timestamp; must be in `YYYY-MM-DDThh:mm:ssZ` format. - :param datetime end_time: Metric is computed from data recorded before this - timestamp; must be in `YYYY-MM-DDThh:mm:ssZ` format. - :param str result_type: The type of result to consider when calculating the - metric. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'get_metrics_query') - headers.update(sdk_headers) - - params = { - 'version': self.version, - 'start_time': start_time, - 'end_time': end_time, - 'result_type': result_type - } - - url = '/v1/metrics/number_of_queries' - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def get_metrics_query_event(self, - start_time=None, - end_time=None, - result_type=None, - **kwargs): - """ - Number of queries with an event over time. - - Total number of queries using the **natural_language_query** parameter that have a - corresponding \"click\" event over a specified time window. This metric requires - having integrated event tracking in your application using the **Events** API. - - :param datetime start_time: Metric is computed from data recorded after this - timestamp; must be in `YYYY-MM-DDThh:mm:ssZ` format. - :param datetime end_time: Metric is computed from data recorded before this - timestamp; must be in `YYYY-MM-DDThh:mm:ssZ` format. - :param str result_type: The type of result to consider when calculating the - metric. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', - 'get_metrics_query_event') - headers.update(sdk_headers) - - params = { - 'version': self.version, - 'start_time': start_time, - 'end_time': end_time, - 'result_type': result_type - } - - url = '/v1/metrics/number_of_queries_with_event' - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def get_metrics_query_no_results(self, - start_time=None, - end_time=None, - result_type=None, - **kwargs): - """ - Number of queries with no search results over time. - - Total number of queries using the **natural_language_query** parameter that have - no results returned over a specified time window. - - :param datetime start_time: Metric is computed from data recorded after this - timestamp; must be in `YYYY-MM-DDThh:mm:ssZ` format. - :param datetime end_time: Metric is computed from data recorded before this - timestamp; must be in `YYYY-MM-DDThh:mm:ssZ` format. - :param str result_type: The type of result to consider when calculating the - metric. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', - 'get_metrics_query_no_results') - headers.update(sdk_headers) - - params = { - 'version': self.version, - 'start_time': start_time, - 'end_time': end_time, - 'result_type': result_type - } - - url = '/v1/metrics/number_of_queries_with_no_search_results' - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def get_metrics_query_token_event(self, count=None, **kwargs): - """ - Most frequent query tokens with an event. - - The most frequent query tokens parsed from the **natural_language_query** - parameter and their corresponding \"click\" event rate within the recording period - (queries and events are stored for 30 days). A query token is an individual word - or unigram within the query string. - - :param int count: Number of results to return. The maximum for the **count** and - **offset** values together in any one query is **10000**. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', - 'get_metrics_query_token_event') - headers.update(sdk_headers) - - params = {'version': self.version, 'count': count} - - url = '/v1/metrics/top_query_tokens_with_event_rate' - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def query_log(self, - filter=None, - query=None, - count=None, - offset=None, - sort=None, - **kwargs): - """ - Search the query and event log. - - Searches the query and event log to find query sessions that match the specified - criteria. Searching the **logs** endpoint uses the standard Discovery query syntax - for the parameters that are supported. - - :param str filter: A cacheable query that excludes documents that don't mention - the query content. Filter searches are better for metadata-type searches and for - assessing the concepts in the data set. - :param str query: A query search returns all documents in your data set with full - enrichments and full text, but with the most relevant documents listed first. Use - a query search when you want to find the most relevant search results. You cannot - use **natural_language_query** and **query** at the same time. - :param int count: Number of results to return. The maximum for the **count** and - **offset** values together in any one query is **10000**. - :param int offset: The number of query results to skip at the beginning. For - example, if the total number of results that are returned is 10 and the offset is - 8, it returns the last two results. The maximum for the **count** and **offset** - values together in any one query is **10000**. - :param list[str] sort: A comma-separated list of fields in the document to sort - on. You can optionally specify a sort direction by prefixing the field with `-` - for descending or `+` for ascending. Ascending is the default sort direction if no - prefix is specified. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'query_log') - headers.update(sdk_headers) - - params = { - 'version': self.version, - 'filter': filter, - 'query': query, - 'count': count, - 'offset': offset, - 'sort': self._convert_list(sort) - } - - url = '/v1/logs' - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - ######################### - # Credentials - ######################### - - def create_credentials(self, - environment_id, - source_type=None, - credential_details=None, - **kwargs): - """ - Create credentials. - - Creates a set of credentials to connect to a remote source. Created credentials - are used in a configuration to associate a collection with the remote source. - **Note:** All credentials are sent over an encrypted connection and encrypted at - rest. - - :param str environment_id: The ID of the environment. - :param str source_type: The source that this credentials object connects to. - - `box` indicates the credentials are used to connect an instance of Enterprise - Box. - - `salesforce` indicates the credentials are used to connect to Salesforce. - - `sharepoint` indicates the credentials are used to connect to Microsoft - SharePoint Online. - - `web_crawl` indicates the credentials are used to perform a web crawl. - = `cloud_object_storage` indicates the credentials are used to connect to an IBM - Cloud Object Store. - :param CredentialDetails credential_details: Object containing details of the - stored credentials. - Obtain credentials for your source from the administrator of the source. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if credential_details is not None: - credential_details = self._convert_model(credential_details, - CredentialDetails) - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'create_credentials') - headers.update(sdk_headers) - - params = {'version': self.version} - - data = { - 'source_type': source_type, - 'credential_details': credential_details - } - - url = '/v1/environments/{0}/credentials'.format( - *self._encode_path_vars(environment_id)) - response = self.request( - method='POST', - url=url, - headers=headers, - params=params, - json=data, - accept_json=True) - return response - - def delete_credentials(self, environment_id, credential_id, **kwargs): - """ - Delete credentials. - - Deletes a set of stored credentials from your Discovery instance. - - :param str environment_id: The ID of the environment. - :param str credential_id: The unique identifier for a set of source credentials. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if credential_id is None: - raise ValueError('credential_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'delete_credentials') - headers.update(sdk_headers) - - params = {'version': self.version} - - url = '/v1/environments/{0}/credentials/{1}'.format( - *self._encode_path_vars(environment_id, credential_id)) - response = self.request( - method='DELETE', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def get_credentials(self, environment_id, credential_id, **kwargs): - """ - View Credentials. - - Returns details about the specified credentials. - **Note:** Secure credential information such as a password or SSH key is never - returned and must be obtained from the source system. - - :param str environment_id: The ID of the environment. - :param str credential_id: The unique identifier for a set of source credentials. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if credential_id is None: - raise ValueError('credential_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'get_credentials') - headers.update(sdk_headers) - - params = {'version': self.version} - - url = '/v1/environments/{0}/credentials/{1}'.format( - *self._encode_path_vars(environment_id, credential_id)) - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def list_credentials(self, environment_id, **kwargs): - """ - List credentials. - - List all the source credentials that have been created for this service instance. - **Note:** All credentials are sent over an encrypted connection and encrypted at - rest. - - :param str environment_id: The ID of the environment. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'list_credentials') - headers.update(sdk_headers) - - params = {'version': self.version} - - url = '/v1/environments/{0}/credentials'.format( - *self._encode_path_vars(environment_id)) - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def update_credentials(self, - environment_id, - credential_id, - source_type=None, - credential_details=None, - **kwargs): - """ - Update credentials. - - Updates an existing set of source credentials. - **Note:** All credentials are sent over an encrypted connection and encrypted at - rest. - - :param str environment_id: The ID of the environment. - :param str credential_id: The unique identifier for a set of source credentials. - :param str source_type: The source that this credentials object connects to. - - `box` indicates the credentials are used to connect an instance of Enterprise - Box. - - `salesforce` indicates the credentials are used to connect to Salesforce. - - `sharepoint` indicates the credentials are used to connect to Microsoft - SharePoint Online. - - `web_crawl` indicates the credentials are used to perform a web crawl. - = `cloud_object_storage` indicates the credentials are used to connect to an IBM - Cloud Object Store. - :param CredentialDetails credential_details: Object containing details of the - stored credentials. - Obtain credentials for your source from the administrator of the source. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if credential_id is None: - raise ValueError('credential_id must be provided') - if credential_details is not None: - credential_details = self._convert_model(credential_details, - CredentialDetails) - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'update_credentials') - headers.update(sdk_headers) - - params = {'version': self.version} - - data = { - 'source_type': source_type, - 'credential_details': credential_details - } - - url = '/v1/environments/{0}/credentials/{1}'.format( - *self._encode_path_vars(environment_id, credential_id)) - response = self.request( - method='PUT', - url=url, - headers=headers, - params=params, - json=data, - accept_json=True) - return response - - ######################### - # gatewayConfiguration - ######################### - - def create_gateway(self, environment_id, name=None, **kwargs): - """ - Create Gateway. - - Create a gateway configuration to use with a remotely installed gateway. - - :param str environment_id: The ID of the environment. - :param str name: User-defined name. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'create_gateway') - headers.update(sdk_headers) - - params = {'version': self.version} - - data = {'name': name} - - url = '/v1/environments/{0}/gateways'.format( - *self._encode_path_vars(environment_id)) - response = self.request( - method='POST', - url=url, - headers=headers, - params=params, - json=data, - accept_json=True) - return response - - def delete_gateway(self, environment_id, gateway_id, **kwargs): - """ - Delete Gateway. - - Delete the specified gateway configuration. - - :param str environment_id: The ID of the environment. - :param str gateway_id: The requested gateway ID. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if gateway_id is None: - raise ValueError('gateway_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'delete_gateway') - headers.update(sdk_headers) - - params = {'version': self.version} - - url = '/v1/environments/{0}/gateways/{1}'.format( - *self._encode_path_vars(environment_id, gateway_id)) - response = self.request( - method='DELETE', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def get_gateway(self, environment_id, gateway_id, **kwargs): - """ - List Gateway Details. - - List information about the specified gateway. - - :param str environment_id: The ID of the environment. - :param str gateway_id: The requested gateway ID. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - if gateway_id is None: - raise ValueError('gateway_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'get_gateway') - headers.update(sdk_headers) - - params = {'version': self.version} - - url = '/v1/environments/{0}/gateways/{1}'.format( - *self._encode_path_vars(environment_id, gateway_id)) - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def list_gateways(self, environment_id, **kwargs): - """ - List Gateways. - - List the currently configured gateways. - - :param str environment_id: The ID of the environment. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if environment_id is None: - raise ValueError('environment_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('discovery', 'V1', 'list_gateways') - headers.update(sdk_headers) - - params = {'version': self.version} - - url = '/v1/environments/{0}/gateways'.format( - *self._encode_path_vars(environment_id)) - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - -############################################################################## -# Models -############################################################################## - - -class AggregationResult(object): - """ - AggregationResult. - - :attr str key: (optional) Key that matched the aggregation type. - :attr int matching_results: (optional) Number of matching results. - :attr list[QueryAggregation] aggregations: (optional) Aggregations returned in the - case of chained aggregations. - """ - - def __init__(self, key=None, matching_results=None, aggregations=None): - """ - Initialize a AggregationResult object. - - :param str key: (optional) Key that matched the aggregation type. - :param int matching_results: (optional) Number of matching results. - :param list[QueryAggregation] aggregations: (optional) Aggregations returned in - the case of chained aggregations. - """ - self.key = key - self.matching_results = matching_results - self.aggregations = aggregations - - @classmethod - def _from_dict(cls, _dict): - """Initialize a AggregationResult object from a json dictionary.""" - args = {} - if 'key' in _dict: - args['key'] = _dict.get('key') - if 'matching_results' in _dict: - args['matching_results'] = _dict.get('matching_results') - if 'aggregations' in _dict: - args['aggregations'] = [ - QueryAggregation._from_dict(x) - for x in (_dict.get('aggregations')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'key') and self.key is not None: - _dict['key'] = self.key - if hasattr(self, - 'matching_results') and self.matching_results is not None: - _dict['matching_results'] = self.matching_results - if hasattr(self, 'aggregations') and self.aggregations is not None: - _dict['aggregations'] = [x._to_dict() for x in self.aggregations] - return _dict - - def __str__(self): - """Return a `str` version of this AggregationResult object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Calculation(object): - """ - Calculation. - - :attr str field: (optional) The field where the aggregation is located in the - document. - :attr float value: (optional) Value of the aggregation. - """ - - def __init__(self, - type=None, - results=None, - matching_results=None, - aggregations=None, - field=None, - value=None): - """ - Initialize a Calculation object. - - :param str type: (optional) The type of aggregation command used. For example: - term, filter, max, min, etc. - :param list[AggregationResult] results: (optional) Array of aggregation results. - :param int matching_results: (optional) Number of matching results. - :param list[QueryAggregation] aggregations: (optional) Aggregations returned by - the Discovery service. - :param str field: (optional) The field where the aggregation is located in the - document. - :param float value: (optional) Value of the aggregation. - """ - self.field = field - self.value = value - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Calculation object from a json dictionary.""" - args = {} - if 'field' in _dict: - args['field'] = _dict.get('field') - if 'value' in _dict: - args['value'] = _dict.get('value') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'field') and self.field is not None: - _dict['field'] = self.field - if hasattr(self, 'value') and self.value is not None: - _dict['value'] = self.value - return _dict - - def __str__(self): - """Return a `str` version of this Calculation object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Collection(object): - """ - A collection for storing documents. - - :attr str collection_id: (optional) The unique identifier of the collection. - :attr str name: (optional) The name of the collection. - :attr str description: (optional) The description of the collection. - :attr datetime created: (optional) The creation date of the collection in the format - yyyy-MM-dd'T'HH:mmcon:ss.SSS'Z'. - :attr datetime updated: (optional) The timestamp of when the collection was last - updated in the format yyyy-MM-dd'T'HH:mm:ss.SSS'Z'. - :attr str status: (optional) The status of the collection. - :attr str configuration_id: (optional) The unique identifier of the collection's - configuration. - :attr str language: (optional) The language of the documents stored in the collection. - Permitted values include `en` (English), `de` (German), and `es` (Spanish). - :attr DocumentCounts document_counts: (optional) - :attr CollectionDiskUsage disk_usage: (optional) Summary of the disk usage statistics - for this collection. - :attr TrainingStatus training_status: (optional) - :attr SourceStatus source_crawl: (optional) Object containing source crawl status - information. - """ - - def __init__(self, - collection_id=None, - name=None, - description=None, - created=None, - updated=None, - status=None, - configuration_id=None, - language=None, - document_counts=None, - disk_usage=None, - training_status=None, - source_crawl=None): - """ - Initialize a Collection object. - - :param str collection_id: (optional) The unique identifier of the collection. - :param str name: (optional) The name of the collection. - :param str description: (optional) The description of the collection. - :param datetime created: (optional) The creation date of the collection in the - format yyyy-MM-dd'T'HH:mmcon:ss.SSS'Z'. - :param datetime updated: (optional) The timestamp of when the collection was last - updated in the format yyyy-MM-dd'T'HH:mm:ss.SSS'Z'. - :param str status: (optional) The status of the collection. - :param str configuration_id: (optional) The unique identifier of the collection's - configuration. - :param str language: (optional) The language of the documents stored in the - collection. Permitted values include `en` (English), `de` (German), and `es` - (Spanish). - :param DocumentCounts document_counts: (optional) - :param CollectionDiskUsage disk_usage: (optional) Summary of the disk usage - statistics for this collection. - :param TrainingStatus training_status: (optional) - :param SourceStatus source_crawl: (optional) Object containing source crawl status - information. - """ - self.collection_id = collection_id - self.name = name - self.description = description - self.created = created - self.updated = updated - self.status = status - self.configuration_id = configuration_id - self.language = language - self.document_counts = document_counts - self.disk_usage = disk_usage - self.training_status = training_status - self.source_crawl = source_crawl - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Collection object from a json dictionary.""" - args = {} - if 'collection_id' in _dict: - args['collection_id'] = _dict.get('collection_id') - if 'name' in _dict: - args['name'] = _dict.get('name') - if 'description' in _dict: - args['description'] = _dict.get('description') - if 'created' in _dict: - args['created'] = string_to_datetime(_dict.get('created')) - if 'updated' in _dict: - args['updated'] = string_to_datetime(_dict.get('updated')) - if 'status' in _dict: - args['status'] = _dict.get('status') - if 'configuration_id' in _dict: - args['configuration_id'] = _dict.get('configuration_id') - if 'language' in _dict: - args['language'] = _dict.get('language') - if 'document_counts' in _dict: - args['document_counts'] = DocumentCounts._from_dict( - _dict.get('document_counts')) - if 'disk_usage' in _dict: - args['disk_usage'] = CollectionDiskUsage._from_dict( - _dict.get('disk_usage')) - if 'training_status' in _dict: - args['training_status'] = TrainingStatus._from_dict( - _dict.get('training_status')) - if 'source_crawl' in _dict: - args['source_crawl'] = SourceStatus._from_dict( - _dict.get('source_crawl')) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'collection_id') and self.collection_id is not None: - _dict['collection_id'] = self.collection_id - if hasattr(self, 'name') and self.name is not None: - _dict['name'] = self.name - if hasattr(self, 'description') and self.description is not None: - _dict['description'] = self.description - if hasattr(self, 'created') and self.created is not None: - _dict['created'] = datetime_to_string(self.created) - if hasattr(self, 'updated') and self.updated is not None: - _dict['updated'] = datetime_to_string(self.updated) - if hasattr(self, 'status') and self.status is not None: - _dict['status'] = self.status - if hasattr(self, - 'configuration_id') and self.configuration_id is not None: - _dict['configuration_id'] = self.configuration_id - if hasattr(self, 'language') and self.language is not None: - _dict['language'] = self.language - if hasattr(self, - 'document_counts') and self.document_counts is not None: - _dict['document_counts'] = self.document_counts._to_dict() - if hasattr(self, 'disk_usage') and self.disk_usage is not None: - _dict['disk_usage'] = self.disk_usage._to_dict() - if hasattr(self, - 'training_status') and self.training_status is not None: - _dict['training_status'] = self.training_status._to_dict() - if hasattr(self, 'source_crawl') and self.source_crawl is not None: - _dict['source_crawl'] = self.source_crawl._to_dict() - return _dict - - def __str__(self): - """Return a `str` version of this Collection object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class CollectionDiskUsage(object): - """ - Summary of the disk usage statistics for this collection. - - :attr int used_bytes: (optional) Number of bytes used by the collection. - """ - - def __init__(self, used_bytes=None): - """ - Initialize a CollectionDiskUsage object. - - :param int used_bytes: (optional) Number of bytes used by the collection. - """ - self.used_bytes = used_bytes - - @classmethod - def _from_dict(cls, _dict): - """Initialize a CollectionDiskUsage object from a json dictionary.""" - args = {} - if 'used_bytes' in _dict: - args['used_bytes'] = _dict.get('used_bytes') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'used_bytes') and self.used_bytes is not None: - _dict['used_bytes'] = self.used_bytes - return _dict - - def __str__(self): - """Return a `str` version of this CollectionDiskUsage object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class CollectionUsage(object): - """ - Summary of the collection usage in the environment. - - :attr int available: (optional) Number of active collections in the environment. - :attr int maximum_allowed: (optional) Total number of collections allowed in the - environment. - """ - - def __init__(self, available=None, maximum_allowed=None): - """ - Initialize a CollectionUsage object. - - :param int available: (optional) Number of active collections in the environment. - :param int maximum_allowed: (optional) Total number of collections allowed in the - environment. - """ - self.available = available - self.maximum_allowed = maximum_allowed - - @classmethod - def _from_dict(cls, _dict): - """Initialize a CollectionUsage object from a json dictionary.""" - args = {} - if 'available' in _dict: - args['available'] = _dict.get('available') - if 'maximum_allowed' in _dict: - args['maximum_allowed'] = _dict.get('maximum_allowed') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'available') and self.available is not None: - _dict['available'] = self.available - if hasattr(self, - 'maximum_allowed') and self.maximum_allowed is not None: - _dict['maximum_allowed'] = self.maximum_allowed - return _dict - - def __str__(self): - """Return a `str` version of this CollectionUsage object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Configuration(object): - """ - A custom configuration for the environment. - - :attr str configuration_id: (optional) The unique identifier of the configuration. - :attr str name: The name of the configuration. - :attr datetime created: (optional) The creation date of the configuration in the - format yyyy-MM-dd'T'HH:mm:ss.SSS'Z'. - :attr datetime updated: (optional) The timestamp of when the configuration was last - updated in the format yyyy-MM-dd'T'HH:mm:ss.SSS'Z'. - :attr str description: (optional) The description of the configuration, if available. - :attr Conversions conversions: (optional) Document conversion settings. - :attr list[Enrichment] enrichments: (optional) An array of document enrichment - settings for the configuration. - :attr list[NormalizationOperation] normalizations: (optional) Defines operations that - can be used to transform the final output JSON into a normalized form. Operations are - executed in the order that they appear in the array. - :attr Source source: (optional) Object containing source parameters for the - configuration. - """ - - def __init__(self, - name, - configuration_id=None, - created=None, - updated=None, - description=None, - conversions=None, - enrichments=None, - normalizations=None, - source=None): - """ - Initialize a Configuration object. - - :param str name: The name of the configuration. - :param str configuration_id: (optional) The unique identifier of the - configuration. - :param datetime created: (optional) The creation date of the configuration in the - format yyyy-MM-dd'T'HH:mm:ss.SSS'Z'. - :param datetime updated: (optional) The timestamp of when the configuration was - last updated in the format yyyy-MM-dd'T'HH:mm:ss.SSS'Z'. - :param str description: (optional) The description of the configuration, if - available. - :param Conversions conversions: (optional) Document conversion settings. - :param list[Enrichment] enrichments: (optional) An array of document enrichment - settings for the configuration. - :param list[NormalizationOperation] normalizations: (optional) Defines operations - that can be used to transform the final output JSON into a normalized form. - Operations are executed in the order that they appear in the array. - :param Source source: (optional) Object containing source parameters for the - configuration. - """ - self.configuration_id = configuration_id - self.name = name - self.created = created - self.updated = updated - self.description = description - self.conversions = conversions - self.enrichments = enrichments - self.normalizations = normalizations - self.source = source - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Configuration object from a json dictionary.""" - args = {} - if 'configuration_id' in _dict: - args['configuration_id'] = _dict.get('configuration_id') - if 'name' in _dict: - args['name'] = _dict.get('name') - else: - raise ValueError( - 'Required property \'name\' not present in Configuration JSON') - if 'created' in _dict: - args['created'] = string_to_datetime(_dict.get('created')) - if 'updated' in _dict: - args['updated'] = string_to_datetime(_dict.get('updated')) - if 'description' in _dict: - args['description'] = _dict.get('description') - if 'conversions' in _dict: - args['conversions'] = Conversions._from_dict( - _dict.get('conversions')) - if 'enrichments' in _dict: - args['enrichments'] = [ - Enrichment._from_dict(x) for x in (_dict.get('enrichments')) - ] - if 'normalizations' in _dict: - args['normalizations'] = [ - NormalizationOperation._from_dict(x) - for x in (_dict.get('normalizations')) - ] - if 'source' in _dict: - args['source'] = Source._from_dict(_dict.get('source')) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, - 'configuration_id') and self.configuration_id is not None: - _dict['configuration_id'] = self.configuration_id - if hasattr(self, 'name') and self.name is not None: - _dict['name'] = self.name - if hasattr(self, 'created') and self.created is not None: - _dict['created'] = datetime_to_string(self.created) - if hasattr(self, 'updated') and self.updated is not None: - _dict['updated'] = datetime_to_string(self.updated) - if hasattr(self, 'description') and self.description is not None: - _dict['description'] = self.description - if hasattr(self, 'conversions') and self.conversions is not None: - _dict['conversions'] = self.conversions._to_dict() - if hasattr(self, 'enrichments') and self.enrichments is not None: - _dict['enrichments'] = [x._to_dict() for x in self.enrichments] - if hasattr(self, 'normalizations') and self.normalizations is not None: - _dict['normalizations'] = [ - x._to_dict() for x in self.normalizations - ] - if hasattr(self, 'source') and self.source is not None: - _dict['source'] = self.source._to_dict() - return _dict - - def __str__(self): - """Return a `str` version of this Configuration object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Conversions(object): - """ - Document conversion settings. - - :attr PdfSettings pdf: (optional) A list of PDF conversion settings. - :attr WordSettings word: (optional) A list of Word conversion settings. - :attr HtmlSettings html: (optional) A list of HTML conversion settings. - :attr SegmentSettings segment: (optional) A list of Document Segmentation settings. - :attr list[NormalizationOperation] json_normalizations: (optional) Defines operations - that can be used to transform the final output JSON into a normalized form. Operations - are executed in the order that they appear in the array. - """ - - def __init__(self, - pdf=None, - word=None, - html=None, - segment=None, - json_normalizations=None): - """ - Initialize a Conversions object. - - :param PdfSettings pdf: (optional) A list of PDF conversion settings. - :param WordSettings word: (optional) A list of Word conversion settings. - :param HtmlSettings html: (optional) A list of HTML conversion settings. - :param SegmentSettings segment: (optional) A list of Document Segmentation - settings. - :param list[NormalizationOperation] json_normalizations: (optional) Defines - operations that can be used to transform the final output JSON into a normalized - form. Operations are executed in the order that they appear in the array. - """ - self.pdf = pdf - self.word = word - self.html = html - self.segment = segment - self.json_normalizations = json_normalizations - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Conversions object from a json dictionary.""" - args = {} - if 'pdf' in _dict: - args['pdf'] = PdfSettings._from_dict(_dict.get('pdf')) - if 'word' in _dict: - args['word'] = WordSettings._from_dict(_dict.get('word')) - if 'html' in _dict: - args['html'] = HtmlSettings._from_dict(_dict.get('html')) - if 'segment' in _dict: - args['segment'] = SegmentSettings._from_dict(_dict.get('segment')) - if 'json_normalizations' in _dict: - args['json_normalizations'] = [ - NormalizationOperation._from_dict(x) - for x in (_dict.get('json_normalizations')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'pdf') and self.pdf is not None: - _dict['pdf'] = self.pdf._to_dict() - if hasattr(self, 'word') and self.word is not None: - _dict['word'] = self.word._to_dict() - if hasattr(self, 'html') and self.html is not None: - _dict['html'] = self.html._to_dict() - if hasattr(self, 'segment') and self.segment is not None: - _dict['segment'] = self.segment._to_dict() - if hasattr( - self, - 'json_normalizations') and self.json_normalizations is not None: - _dict['json_normalizations'] = [ - x._to_dict() for x in self.json_normalizations - ] - return _dict - - def __str__(self): - """Return a `str` version of this Conversions object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class CreateEventResponse(object): - """ - An object defining the event being created. - - :attr str type: (optional) The event type that was created. - :attr EventData data: (optional) Query event data object. - """ - - def __init__(self, type=None, data=None): - """ - Initialize a CreateEventResponse object. - - :param str type: (optional) The event type that was created. - :param EventData data: (optional) Query event data object. - """ - self.type = type - self.data = data - - @classmethod - def _from_dict(cls, _dict): - """Initialize a CreateEventResponse object from a json dictionary.""" - args = {} - if 'type' in _dict: - args['type'] = _dict.get('type') - if 'data' in _dict: - args['data'] = EventData._from_dict(_dict.get('data')) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'type') and self.type is not None: - _dict['type'] = self.type - if hasattr(self, 'data') and self.data is not None: - _dict['data'] = self.data._to_dict() - return _dict - - def __str__(self): - """Return a `str` version of this CreateEventResponse object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class CredentialDetails(object): - """ - Object containing details of the stored credentials. - Obtain credentials for your source from the administrator of the source. - - :attr str credential_type: (optional) The authentication method for this credentials - definition. The **credential_type** specified must be supported by the - **source_type**. The following combinations are possible: - - `"source_type": "box"` - valid `credential_type`s: `oauth2` - - `"source_type": "salesforce"` - valid `credential_type`s: `username_password` - - `"source_type": "sharepoint"` - valid `credential_type`s: `saml` with - **source_version** of `online`, or `ntml_v1` with **source_version** of `2016` - - `"source_type": "web_crawl"` - valid `credential_type`s: `noauth` or `basic` - - "source_type": "cloud_object_storage"` - valid `credential_type`s: `aws4_hmac`. - :attr str client_id: (optional) The **client_id** of the source that these credentials - connect to. Only valid, and required, with a **credential_type** of `oauth2`. - :attr str enterprise_id: (optional) The **enterprise_id** of the Box site that these - credentials connect to. Only valid, and required, with a **source_type** of `box`. - :attr str url: (optional) The **url** of the source that these credentials connect to. - Only valid, and required, with a **credential_type** of `username_password`, `noauth`, - and `basic`. - :attr str username: (optional) The **username** of the source that these credentials - connect to. Only valid, and required, with a **credential_type** of `saml`, - `username_password`, `basic`, or `ntml_v1`. - :attr str organization_url: (optional) The **organization_url** of the source that - these credentials connect to. Only valid, and required, with a **credential_type** of - `saml`. - :attr str site_collection_path: (optional) The **site_collection.path** of the source - that these credentials connect to. Only valid, and required, with a **source_type** of - `sharepoint`. - :attr str client_secret: (optional) The **client_secret** of the source that these - credentials connect to. Only valid, and required, with a **credential_type** of - `oauth2`. This value is never returned and is only used when creating or modifying - **credentials**. - :attr str public_key_id: (optional) The **public_key_id** of the source that these - credentials connect to. Only valid, and required, with a **credential_type** of - `oauth2`. This value is never returned and is only used when creating or modifying - **credentials**. - :attr str private_key: (optional) The **private_key** of the source that these - credentials connect to. Only valid, and required, with a **credential_type** of - `oauth2`. This value is never returned and is only used when creating or modifying - **credentials**. - :attr str passphrase: (optional) The **passphrase** of the source that these - credentials connect to. Only valid, and required, with a **credential_type** of - `oauth2`. This value is never returned and is only used when creating or modifying - **credentials**. - :attr str password: (optional) The **password** of the source that these credentials - connect to. Only valid, and required, with **credential_type**s of `saml`, - `username_password`, `basic`, or `ntml_v1`. - **Note:** When used with a **source_type** of `salesforce`, the password consists of - the Salesforce password and a valid Salesforce security token concatenated. This value - is never returned and is only used when creating or modifying **credentials**. - :attr str gateway_id: (optional) The ID of the **gateway** to be connected through - (when connecting to intranet sites). Only valid with a **credential_type** of - `noauth`, `basic`, or `ntml_v1`. Gateways are created using the - `/v1/environments/{environment_id}/gateways` methods. - :attr str source_version: (optional) The type of Sharepoint repository to connect to. - Only valid, and required, with a **source_type** of `sharepoint`. - :attr str web_application_url: (optional) SharePoint OnPrem WebApplication URL. Only - valid, and required, with a **source_version** of `2016`. - :attr str domain: (optional) The domain used to log in to your OnPrem SharePoint - account. Only valid, and required, with a **source_version** of `2016`. - :attr str endpoint: (optional) The endpoint associated with the cloud object store - that your are connecting to. Only valid, and required, with a **credential_type** of - `aws4_hmac`. - :attr str access_key_id: (optional) The access key ID associated with the cloud object - store. Only valid, and required, with a **credential_type** of `aws4_hmac`. For more - infomation, see the [cloud object store - documentation](https://cloud.ibm.com/docs/services/cloud-object-storage?topic=cloud-object-storage-using-hmac-credentials#using-hmac-credentials). - :attr str secret_access_key: (optional) The secret access key associated with the - cloud object store. Only valid, and required, with a **credential_type** of - `aws4_hmac`. This value is never returned and is only used when creating or modifying - **credentials**. For more infomation, see the [cloud object store - documentation](https://cloud.ibm.com/docs/services/cloud-object-storage?topic=cloud-object-storage-using-hmac-credentials#using-hmac-credentials). - """ - - def __init__(self, - credential_type=None, - client_id=None, - enterprise_id=None, - url=None, - username=None, - organization_url=None, - site_collection_path=None, - client_secret=None, - public_key_id=None, - private_key=None, - passphrase=None, - password=None, - gateway_id=None, - source_version=None, - web_application_url=None, - domain=None, - endpoint=None, - access_key_id=None, - secret_access_key=None): - """ - Initialize a CredentialDetails object. - - :param str credential_type: (optional) The authentication method for this - credentials definition. The **credential_type** specified must be supported by - the **source_type**. The following combinations are possible: - - `"source_type": "box"` - valid `credential_type`s: `oauth2` - - `"source_type": "salesforce"` - valid `credential_type`s: `username_password` - - `"source_type": "sharepoint"` - valid `credential_type`s: `saml` with - **source_version** of `online`, or `ntml_v1` with **source_version** of `2016` - - `"source_type": "web_crawl"` - valid `credential_type`s: `noauth` or `basic` - - "source_type": "cloud_object_storage"` - valid `credential_type`s: `aws4_hmac`. - :param str client_id: (optional) The **client_id** of the source that these - credentials connect to. Only valid, and required, with a **credential_type** of - `oauth2`. - :param str enterprise_id: (optional) The **enterprise_id** of the Box site that - these credentials connect to. Only valid, and required, with a **source_type** of - `box`. - :param str url: (optional) The **url** of the source that these credentials - connect to. Only valid, and required, with a **credential_type** of - `username_password`, `noauth`, and `basic`. - :param str username: (optional) The **username** of the source that these - credentials connect to. Only valid, and required, with a **credential_type** of - `saml`, `username_password`, `basic`, or `ntml_v1`. - :param str organization_url: (optional) The **organization_url** of the source - that these credentials connect to. Only valid, and required, with a - **credential_type** of `saml`. - :param str site_collection_path: (optional) The **site_collection.path** of the - source that these credentials connect to. Only valid, and required, with a - **source_type** of `sharepoint`. - :param str client_secret: (optional) The **client_secret** of the source that - these credentials connect to. Only valid, and required, with a **credential_type** - of `oauth2`. This value is never returned and is only used when creating or - modifying **credentials**. - :param str public_key_id: (optional) The **public_key_id** of the source that - these credentials connect to. Only valid, and required, with a **credential_type** - of `oauth2`. This value is never returned and is only used when creating or - modifying **credentials**. - :param str private_key: (optional) The **private_key** of the source that these - credentials connect to. Only valid, and required, with a **credential_type** of - `oauth2`. This value is never returned and is only used when creating or modifying - **credentials**. - :param str passphrase: (optional) The **passphrase** of the source that these - credentials connect to. Only valid, and required, with a **credential_type** of - `oauth2`. This value is never returned and is only used when creating or modifying - **credentials**. - :param str password: (optional) The **password** of the source that these - credentials connect to. Only valid, and required, with **credential_type**s of - `saml`, `username_password`, `basic`, or `ntml_v1`. - **Note:** When used with a **source_type** of `salesforce`, the password consists - of the Salesforce password and a valid Salesforce security token concatenated. - This value is never returned and is only used when creating or modifying - **credentials**. - :param str gateway_id: (optional) The ID of the **gateway** to be connected - through (when connecting to intranet sites). Only valid with a **credential_type** - of `noauth`, `basic`, or `ntml_v1`. Gateways are created using the - `/v1/environments/{environment_id}/gateways` methods. - :param str source_version: (optional) The type of Sharepoint repository to connect - to. Only valid, and required, with a **source_type** of `sharepoint`. - :param str web_application_url: (optional) SharePoint OnPrem WebApplication URL. - Only valid, and required, with a **source_version** of `2016`. - :param str domain: (optional) The domain used to log in to your OnPrem SharePoint - account. Only valid, and required, with a **source_version** of `2016`. - :param str endpoint: (optional) The endpoint associated with the cloud object - store that your are connecting to. Only valid, and required, with a - **credential_type** of `aws4_hmac`. - :param str access_key_id: (optional) The access key ID associated with the cloud - object store. Only valid, and required, with a **credential_type** of `aws4_hmac`. - For more infomation, see the [cloud object store - documentation](https://cloud.ibm.com/docs/services/cloud-object-storage?topic=cloud-object-storage-using-hmac-credentials#using-hmac-credentials). - :param str secret_access_key: (optional) The secret access key associated with the - cloud object store. Only valid, and required, with a **credential_type** of - `aws4_hmac`. This value is never returned and is only used when creating or - modifying **credentials**. For more infomation, see the [cloud object store - documentation](https://cloud.ibm.com/docs/services/cloud-object-storage?topic=cloud-object-storage-using-hmac-credentials#using-hmac-credentials). - """ - self.credential_type = credential_type - self.client_id = client_id - self.enterprise_id = enterprise_id - self.url = url - self.username = username - self.organization_url = organization_url - self.site_collection_path = site_collection_path - self.client_secret = client_secret - self.public_key_id = public_key_id - self.private_key = private_key - self.passphrase = passphrase - self.password = password - self.gateway_id = gateway_id - self.source_version = source_version - self.web_application_url = web_application_url - self.domain = domain - self.endpoint = endpoint - self.access_key_id = access_key_id - self.secret_access_key = secret_access_key - - @classmethod - def _from_dict(cls, _dict): - """Initialize a CredentialDetails object from a json dictionary.""" - args = {} - if 'credential_type' in _dict: - args['credential_type'] = _dict.get('credential_type') - if 'client_id' in _dict: - args['client_id'] = _dict.get('client_id') - if 'enterprise_id' in _dict: - args['enterprise_id'] = _dict.get('enterprise_id') - if 'url' in _dict: - args['url'] = _dict.get('url') - if 'username' in _dict: - args['username'] = _dict.get('username') - if 'organization_url' in _dict: - args['organization_url'] = _dict.get('organization_url') - if 'site_collection.path' in _dict: - args['site_collection_path'] = _dict.get('site_collection.path') - if 'client_secret' in _dict: - args['client_secret'] = _dict.get('client_secret') - if 'public_key_id' in _dict: - args['public_key_id'] = _dict.get('public_key_id') - if 'private_key' in _dict: - args['private_key'] = _dict.get('private_key') - if 'passphrase' in _dict: - args['passphrase'] = _dict.get('passphrase') - if 'password' in _dict: - args['password'] = _dict.get('password') - if 'gateway_id' in _dict: - args['gateway_id'] = _dict.get('gateway_id') - if 'source_version' in _dict: - args['source_version'] = _dict.get('source_version') - if 'web_application_url' in _dict: - args['web_application_url'] = _dict.get('web_application_url') - if 'domain' in _dict: - args['domain'] = _dict.get('domain') - if 'endpoint' in _dict: - args['endpoint'] = _dict.get('endpoint') - if 'access_key_id' in _dict: - args['access_key_id'] = _dict.get('access_key_id') - if 'secret_access_key' in _dict: - args['secret_access_key'] = _dict.get('secret_access_key') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, - 'credential_type') and self.credential_type is not None: - _dict['credential_type'] = self.credential_type - if hasattr(self, 'client_id') and self.client_id is not None: - _dict['client_id'] = self.client_id - if hasattr(self, 'enterprise_id') and self.enterprise_id is not None: - _dict['enterprise_id'] = self.enterprise_id - if hasattr(self, 'url') and self.url is not None: - _dict['url'] = self.url - if hasattr(self, 'username') and self.username is not None: - _dict['username'] = self.username - if hasattr(self, - 'organization_url') and self.organization_url is not None: - _dict['organization_url'] = self.organization_url - if hasattr(self, 'site_collection_path' - ) and self.site_collection_path is not None: - _dict['site_collection.path'] = self.site_collection_path - if hasattr(self, 'client_secret') and self.client_secret is not None: - _dict['client_secret'] = self.client_secret - if hasattr(self, 'public_key_id') and self.public_key_id is not None: - _dict['public_key_id'] = self.public_key_id - if hasattr(self, 'private_key') and self.private_key is not None: - _dict['private_key'] = self.private_key - if hasattr(self, 'passphrase') and self.passphrase is not None: - _dict['passphrase'] = self.passphrase - if hasattr(self, 'password') and self.password is not None: - _dict['password'] = self.password - if hasattr(self, 'gateway_id') and self.gateway_id is not None: - _dict['gateway_id'] = self.gateway_id - if hasattr(self, 'source_version') and self.source_version is not None: - _dict['source_version'] = self.source_version - if hasattr( - self, - 'web_application_url') and self.web_application_url is not None: - _dict['web_application_url'] = self.web_application_url - if hasattr(self, 'domain') and self.domain is not None: - _dict['domain'] = self.domain - if hasattr(self, 'endpoint') and self.endpoint is not None: - _dict['endpoint'] = self.endpoint - if hasattr(self, 'access_key_id') and self.access_key_id is not None: - _dict['access_key_id'] = self.access_key_id - if hasattr(self, - 'secret_access_key') and self.secret_access_key is not None: - _dict['secret_access_key'] = self.secret_access_key - return _dict - - def __str__(self): - """Return a `str` version of this CredentialDetails object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Credentials(object): - """ - Object containing credential information. - - :attr str credential_id: (optional) Unique identifier for this set of credentials. - :attr str source_type: (optional) The source that this credentials object connects to. - - `box` indicates the credentials are used to connect an instance of Enterprise Box. - - `salesforce` indicates the credentials are used to connect to Salesforce. - - `sharepoint` indicates the credentials are used to connect to Microsoft SharePoint - Online. - - `web_crawl` indicates the credentials are used to perform a web crawl. - = `cloud_object_storage` indicates the credentials are used to connect to an IBM - Cloud Object Store. - :attr CredentialDetails credential_details: (optional) Object containing details of - the stored credentials. - Obtain credentials for your source from the administrator of the source. - """ - - def __init__(self, - credential_id=None, - source_type=None, - credential_details=None): - """ - Initialize a Credentials object. - - :param str credential_id: (optional) Unique identifier for this set of - credentials. - :param str source_type: (optional) The source that this credentials object - connects to. - - `box` indicates the credentials are used to connect an instance of Enterprise - Box. - - `salesforce` indicates the credentials are used to connect to Salesforce. - - `sharepoint` indicates the credentials are used to connect to Microsoft - SharePoint Online. - - `web_crawl` indicates the credentials are used to perform a web crawl. - = `cloud_object_storage` indicates the credentials are used to connect to an IBM - Cloud Object Store. - :param CredentialDetails credential_details: (optional) Object containing details - of the stored credentials. - Obtain credentials for your source from the administrator of the source. - """ - self.credential_id = credential_id - self.source_type = source_type - self.credential_details = credential_details - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Credentials object from a json dictionary.""" - args = {} - if 'credential_id' in _dict: - args['credential_id'] = _dict.get('credential_id') - if 'source_type' in _dict: - args['source_type'] = _dict.get('source_type') - if 'credential_details' in _dict: - args['credential_details'] = CredentialDetails._from_dict( - _dict.get('credential_details')) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'credential_id') and self.credential_id is not None: - _dict['credential_id'] = self.credential_id - if hasattr(self, 'source_type') and self.source_type is not None: - _dict['source_type'] = self.source_type - if hasattr( - self, - 'credential_details') and self.credential_details is not None: - _dict['credential_details'] = self.credential_details._to_dict() - return _dict - - def __str__(self): - """Return a `str` version of this Credentials object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class CredentialsList(object): - """ - CredentialsList. - - :attr list[Credentials] credentials: (optional) An array of credential definitions - that were created for this instance. - """ - - def __init__(self, credentials=None): - """ - Initialize a CredentialsList object. - - :param list[Credentials] credentials: (optional) An array of credential - definitions that were created for this instance. - """ - self.credentials = credentials - - @classmethod - def _from_dict(cls, _dict): - """Initialize a CredentialsList object from a json dictionary.""" - args = {} - if 'credentials' in _dict: - args['credentials'] = [ - Credentials._from_dict(x) for x in (_dict.get('credentials')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'credentials') and self.credentials is not None: - _dict['credentials'] = [x._to_dict() for x in self.credentials] - return _dict - - def __str__(self): - """Return a `str` version of this CredentialsList object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class DeleteCollectionResponse(object): - """ - DeleteCollectionResponse. - - :attr str collection_id: The unique identifier of the collection that is being - deleted. - :attr str status: The status of the collection. The status of a successful deletion - operation is `deleted`. - """ - - def __init__(self, collection_id, status): - """ - Initialize a DeleteCollectionResponse object. - - :param str collection_id: The unique identifier of the collection that is being - deleted. - :param str status: The status of the collection. The status of a successful - deletion operation is `deleted`. - """ - self.collection_id = collection_id - self.status = status - - @classmethod - def _from_dict(cls, _dict): - """Initialize a DeleteCollectionResponse object from a json dictionary.""" - args = {} - if 'collection_id' in _dict: - args['collection_id'] = _dict.get('collection_id') - else: - raise ValueError( - 'Required property \'collection_id\' not present in DeleteCollectionResponse JSON' - ) - if 'status' in _dict: - args['status'] = _dict.get('status') - else: - raise ValueError( - 'Required property \'status\' not present in DeleteCollectionResponse JSON' - ) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'collection_id') and self.collection_id is not None: - _dict['collection_id'] = self.collection_id - if hasattr(self, 'status') and self.status is not None: - _dict['status'] = self.status - return _dict - - def __str__(self): - """Return a `str` version of this DeleteCollectionResponse object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class DeleteConfigurationResponse(object): - """ - DeleteConfigurationResponse. - - :attr str configuration_id: The unique identifier for the configuration. - :attr str status: Status of the configuration. A deleted configuration has the status - deleted. - :attr list[Notice] notices: (optional) An array of notice messages, if any. - """ - - def __init__(self, configuration_id, status, notices=None): - """ - Initialize a DeleteConfigurationResponse object. - - :param str configuration_id: The unique identifier for the configuration. - :param str status: Status of the configuration. A deleted configuration has the - status deleted. - :param list[Notice] notices: (optional) An array of notice messages, if any. - """ - self.configuration_id = configuration_id - self.status = status - self.notices = notices - - @classmethod - def _from_dict(cls, _dict): - """Initialize a DeleteConfigurationResponse object from a json dictionary.""" - args = {} - if 'configuration_id' in _dict: - args['configuration_id'] = _dict.get('configuration_id') - else: - raise ValueError( - 'Required property \'configuration_id\' not present in DeleteConfigurationResponse JSON' - ) - if 'status' in _dict: - args['status'] = _dict.get('status') - else: - raise ValueError( - 'Required property \'status\' not present in DeleteConfigurationResponse JSON' - ) - if 'notices' in _dict: - args['notices'] = [ - Notice._from_dict(x) for x in (_dict.get('notices')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, - 'configuration_id') and self.configuration_id is not None: - _dict['configuration_id'] = self.configuration_id - if hasattr(self, 'status') and self.status is not None: - _dict['status'] = self.status - if hasattr(self, 'notices') and self.notices is not None: - _dict['notices'] = [x._to_dict() for x in self.notices] - return _dict - - def __str__(self): - """Return a `str` version of this DeleteConfigurationResponse object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class DeleteCredentials(object): - """ - Object returned after credentials are deleted. - - :attr str credential_id: (optional) The unique identifier of the credentials that have - been deleted. - :attr str status: (optional) The status of the deletion request. - """ - - def __init__(self, credential_id=None, status=None): - """ - Initialize a DeleteCredentials object. - - :param str credential_id: (optional) The unique identifier of the credentials that - have been deleted. - :param str status: (optional) The status of the deletion request. - """ - self.credential_id = credential_id - self.status = status - - @classmethod - def _from_dict(cls, _dict): - """Initialize a DeleteCredentials object from a json dictionary.""" - args = {} - if 'credential_id' in _dict: - args['credential_id'] = _dict.get('credential_id') - if 'status' in _dict: - args['status'] = _dict.get('status') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'credential_id') and self.credential_id is not None: - _dict['credential_id'] = self.credential_id - if hasattr(self, 'status') and self.status is not None: - _dict['status'] = self.status - return _dict - - def __str__(self): - """Return a `str` version of this DeleteCredentials object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class DeleteDocumentResponse(object): - """ - DeleteDocumentResponse. - - :attr str document_id: (optional) The unique identifier of the document. - :attr str status: (optional) Status of the document. A deleted document has the status - deleted. - """ - - def __init__(self, document_id=None, status=None): - """ - Initialize a DeleteDocumentResponse object. - - :param str document_id: (optional) The unique identifier of the document. - :param str status: (optional) Status of the document. A deleted document has the - status deleted. - """ - self.document_id = document_id - self.status = status - - @classmethod - def _from_dict(cls, _dict): - """Initialize a DeleteDocumentResponse object from a json dictionary.""" - args = {} - if 'document_id' in _dict: - args['document_id'] = _dict.get('document_id') - if 'status' in _dict: - args['status'] = _dict.get('status') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'document_id') and self.document_id is not None: - _dict['document_id'] = self.document_id - if hasattr(self, 'status') and self.status is not None: - _dict['status'] = self.status - return _dict - - def __str__(self): - """Return a `str` version of this DeleteDocumentResponse object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class DeleteEnvironmentResponse(object): - """ - DeleteEnvironmentResponse. - - :attr str environment_id: The unique identifier for the environment. - :attr str status: Status of the environment. - """ - - def __init__(self, environment_id, status): - """ - Initialize a DeleteEnvironmentResponse object. - - :param str environment_id: The unique identifier for the environment. - :param str status: Status of the environment. - """ - self.environment_id = environment_id - self.status = status - - @classmethod - def _from_dict(cls, _dict): - """Initialize a DeleteEnvironmentResponse object from a json dictionary.""" - args = {} - if 'environment_id' in _dict: - args['environment_id'] = _dict.get('environment_id') - else: - raise ValueError( - 'Required property \'environment_id\' not present in DeleteEnvironmentResponse JSON' - ) - if 'status' in _dict: - args['status'] = _dict.get('status') - else: - raise ValueError( - 'Required property \'status\' not present in DeleteEnvironmentResponse JSON' - ) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'environment_id') and self.environment_id is not None: - _dict['environment_id'] = self.environment_id - if hasattr(self, 'status') and self.status is not None: - _dict['status'] = self.status - return _dict - - def __str__(self): - """Return a `str` version of this DeleteEnvironmentResponse object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class DiskUsage(object): - """ - Summary of the disk usage statistics for the environment. - - :attr int used_bytes: (optional) Number of bytes within the environment's disk - capacity that are currently used to store data. - :attr int maximum_allowed_bytes: (optional) Total number of bytes available in the - environment's disk capacity. - """ - - def __init__(self, used_bytes=None, maximum_allowed_bytes=None): - """ - Initialize a DiskUsage object. - - :param int used_bytes: (optional) Number of bytes within the environment's disk - capacity that are currently used to store data. - :param int maximum_allowed_bytes: (optional) Total number of bytes available in - the environment's disk capacity. - """ - self.used_bytes = used_bytes - self.maximum_allowed_bytes = maximum_allowed_bytes - - @classmethod - def _from_dict(cls, _dict): - """Initialize a DiskUsage object from a json dictionary.""" - args = {} - if 'used_bytes' in _dict: - args['used_bytes'] = _dict.get('used_bytes') - if 'maximum_allowed_bytes' in _dict: - args['maximum_allowed_bytes'] = _dict.get('maximum_allowed_bytes') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'used_bytes') and self.used_bytes is not None: - _dict['used_bytes'] = self.used_bytes - if hasattr(self, 'maximum_allowed_bytes' - ) and self.maximum_allowed_bytes is not None: - _dict['maximum_allowed_bytes'] = self.maximum_allowed_bytes - return _dict - - def __str__(self): - """Return a `str` version of this DiskUsage object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class DocumentAccepted(object): - """ - DocumentAccepted. - - :attr str document_id: (optional) The unique identifier of the ingested document. - :attr str status: (optional) Status of the document in the ingestion process. A status - of `processing` is returned for documents that are ingested with a *version* date - before `2019-01-01`. The `pending` status is returned for all others. - :attr list[Notice] notices: (optional) Array of notices produced by the - document-ingestion process. - """ - - def __init__(self, document_id=None, status=None, notices=None): - """ - Initialize a DocumentAccepted object. - - :param str document_id: (optional) The unique identifier of the ingested document. - :param str status: (optional) Status of the document in the ingestion process. A - status of `processing` is returned for documents that are ingested with a - *version* date before `2019-01-01`. The `pending` status is returned for all - others. - :param list[Notice] notices: (optional) Array of notices produced by the - document-ingestion process. - """ - self.document_id = document_id - self.status = status - self.notices = notices - - @classmethod - def _from_dict(cls, _dict): - """Initialize a DocumentAccepted object from a json dictionary.""" - args = {} - if 'document_id' in _dict: - args['document_id'] = _dict.get('document_id') - if 'status' in _dict: - args['status'] = _dict.get('status') - if 'notices' in _dict: - args['notices'] = [ - Notice._from_dict(x) for x in (_dict.get('notices')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'document_id') and self.document_id is not None: - _dict['document_id'] = self.document_id - if hasattr(self, 'status') and self.status is not None: - _dict['status'] = self.status - if hasattr(self, 'notices') and self.notices is not None: - _dict['notices'] = [x._to_dict() for x in self.notices] - return _dict - - def __str__(self): - """Return a `str` version of this DocumentAccepted object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class DocumentCounts(object): - """ - DocumentCounts. - - :attr int available: (optional) The total number of available documents in the - collection. - :attr int processing: (optional) The number of documents in the collection that are - currently being processed. - :attr int failed: (optional) The number of documents in the collection that failed to - be ingested. - :attr int pending: (optional) The number of documents that have been uploaded to the - collection, but have not yet started processing. - """ - - def __init__(self, - available=None, - processing=None, - failed=None, - pending=None): - """ - Initialize a DocumentCounts object. - - :param int available: (optional) The total number of available documents in the - collection. - :param int processing: (optional) The number of documents in the collection that - are currently being processed. - :param int failed: (optional) The number of documents in the collection that - failed to be ingested. - :param int pending: (optional) The number of documents that have been uploaded to - the collection, but have not yet started processing. - """ - self.available = available - self.processing = processing - self.failed = failed - self.pending = pending - - @classmethod - def _from_dict(cls, _dict): - """Initialize a DocumentCounts object from a json dictionary.""" - args = {} - if 'available' in _dict: - args['available'] = _dict.get('available') - if 'processing' in _dict: - args['processing'] = _dict.get('processing') - if 'failed' in _dict: - args['failed'] = _dict.get('failed') - if 'pending' in _dict: - args['pending'] = _dict.get('pending') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'available') and self.available is not None: - _dict['available'] = self.available - if hasattr(self, 'processing') and self.processing is not None: - _dict['processing'] = self.processing - if hasattr(self, 'failed') and self.failed is not None: - _dict['failed'] = self.failed - if hasattr(self, 'pending') and self.pending is not None: - _dict['pending'] = self.pending - return _dict - - def __str__(self): - """Return a `str` version of this DocumentCounts object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class DocumentSnapshot(object): - """ - DocumentSnapshot. - - :attr str step: (optional) The step in the document conversion process that the - snapshot object represents. - :attr dict snapshot: (optional) Snapshot of the conversion. - """ - - def __init__(self, step=None, snapshot=None): - """ - Initialize a DocumentSnapshot object. - - :param str step: (optional) The step in the document conversion process that the - snapshot object represents. - :param dict snapshot: (optional) Snapshot of the conversion. - """ - self.step = step - self.snapshot = snapshot - - @classmethod - def _from_dict(cls, _dict): - """Initialize a DocumentSnapshot object from a json dictionary.""" - args = {} - if 'step' in _dict: - args['step'] = _dict.get('step') - if 'snapshot' in _dict: - args['snapshot'] = _dict.get('snapshot') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'step') and self.step is not None: - _dict['step'] = self.step - if hasattr(self, 'snapshot') and self.snapshot is not None: - _dict['snapshot'] = self.snapshot - return _dict - - def __str__(self): - """Return a `str` version of this DocumentSnapshot object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class DocumentStatus(object): - """ - Status information about a submitted document. - - :attr str document_id: The unique identifier of the document. - :attr str configuration_id: (optional) The unique identifier for the configuration. - :attr str status: Status of the document in the ingestion process. - :attr str status_description: Description of the document status. - :attr str filename: (optional) Name of the original source file (if available). - :attr str file_type: (optional) The type of the original source file. - :attr str sha1: (optional) The SHA-1 hash of the original source file (formatted as a - hexadecimal string). - :attr list[Notice] notices: Array of notices produced by the document-ingestion - process. - """ - - def __init__(self, - document_id, - status, - status_description, - notices, - configuration_id=None, - filename=None, - file_type=None, - sha1=None): - """ - Initialize a DocumentStatus object. - - :param str document_id: The unique identifier of the document. - :param str status: Status of the document in the ingestion process. - :param str status_description: Description of the document status. - :param list[Notice] notices: Array of notices produced by the document-ingestion - process. - :param str configuration_id: (optional) The unique identifier for the - configuration. - :param str filename: (optional) Name of the original source file (if available). - :param str file_type: (optional) The type of the original source file. - :param str sha1: (optional) The SHA-1 hash of the original source file (formatted - as a hexadecimal string). - """ - self.document_id = document_id - self.configuration_id = configuration_id - self.status = status - self.status_description = status_description - self.filename = filename - self.file_type = file_type - self.sha1 = sha1 - self.notices = notices - - @classmethod - def _from_dict(cls, _dict): - """Initialize a DocumentStatus object from a json dictionary.""" - args = {} - if 'document_id' in _dict: - args['document_id'] = _dict.get('document_id') - else: - raise ValueError( - 'Required property \'document_id\' not present in DocumentStatus JSON' - ) - if 'configuration_id' in _dict: - args['configuration_id'] = _dict.get('configuration_id') - if 'status' in _dict: - args['status'] = _dict.get('status') - else: - raise ValueError( - 'Required property \'status\' not present in DocumentStatus JSON' - ) - if 'status_description' in _dict: - args['status_description'] = _dict.get('status_description') - else: - raise ValueError( - 'Required property \'status_description\' not present in DocumentStatus JSON' - ) - if 'filename' in _dict: - args['filename'] = _dict.get('filename') - if 'file_type' in _dict: - args['file_type'] = _dict.get('file_type') - if 'sha1' in _dict: - args['sha1'] = _dict.get('sha1') - if 'notices' in _dict: - args['notices'] = [ - Notice._from_dict(x) for x in (_dict.get('notices')) - ] - else: - raise ValueError( - 'Required property \'notices\' not present in DocumentStatus JSON' - ) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'document_id') and self.document_id is not None: - _dict['document_id'] = self.document_id - if hasattr(self, - 'configuration_id') and self.configuration_id is not None: - _dict['configuration_id'] = self.configuration_id - if hasattr(self, 'status') and self.status is not None: - _dict['status'] = self.status - if hasattr( - self, - 'status_description') and self.status_description is not None: - _dict['status_description'] = self.status_description - if hasattr(self, 'filename') and self.filename is not None: - _dict['filename'] = self.filename - if hasattr(self, 'file_type') and self.file_type is not None: - _dict['file_type'] = self.file_type - if hasattr(self, 'sha1') and self.sha1 is not None: - _dict['sha1'] = self.sha1 - if hasattr(self, 'notices') and self.notices is not None: - _dict['notices'] = [x._to_dict() for x in self.notices] - return _dict - - def __str__(self): - """Return a `str` version of this DocumentStatus object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Enrichment(object): - """ - Enrichment. - - :attr str description: (optional) Describes what the enrichment step does. - :attr str destination_field: Field where enrichments will be stored. This field must - already exist or be at most 1 level deeper than an existing field. For example, if - `text` is a top-level field with no sub-fields, `text.foo` is a valid destination but - `text.foo.bar` is not. - :attr str source_field: Field to be enriched. - Arrays can be specified as the **source_field** if the **enrichment** service for this - enrichment is set to `natural_language_undstanding`. - :attr bool overwrite: (optional) Indicates that the enrichments will overwrite the - destination_field field if it already exists. - :attr str enrichment_name: Name of the enrichment service to call. Current options are - `natural_language_understanding` and `elements`. - When using `natual_language_understanding`, the **options** object must contain - Natural Language Understanding options. - When using `elements` the **options** object must contain Element Classification - options. Additionally, when using the `elements` enrichment the configuration - specified and files ingested must meet all the criteria specified in [the - documentation](https://cloud.ibm.com/docs/services/discovery?topic=discovery-element-classification#element-classification). - :attr bool ignore_downstream_errors: (optional) If true, then most errors generated - during the enrichment process will be treated as warnings and will not cause the - document to fail processing. - :attr EnrichmentOptions options: (optional) An object representing the configuration - options to use for the `elements` enrichment. - """ - - def __init__(self, - destination_field, - source_field, - enrichment_name, - description=None, - overwrite=None, - ignore_downstream_errors=None, - options=None): - """ - Initialize a Enrichment object. - - :param str destination_field: Field where enrichments will be stored. This field - must already exist or be at most 1 level deeper than an existing field. For - example, if `text` is a top-level field with no sub-fields, `text.foo` is a valid - destination but `text.foo.bar` is not. - :param str source_field: Field to be enriched. - Arrays can be specified as the **source_field** if the **enrichment** service for - this enrichment is set to `natural_language_undstanding`. - :param str enrichment_name: Name of the enrichment service to call. Current - options are `natural_language_understanding` and `elements`. - When using `natual_language_understanding`, the **options** object must contain - Natural Language Understanding options. - When using `elements` the **options** object must contain Element Classification - options. Additionally, when using the `elements` enrichment the configuration - specified and files ingested must meet all the criteria specified in [the - documentation](https://cloud.ibm.com/docs/services/discovery?topic=discovery-element-classification#element-classification). - :param str description: (optional) Describes what the enrichment step does. - :param bool overwrite: (optional) Indicates that the enrichments will overwrite - the destination_field field if it already exists. - :param bool ignore_downstream_errors: (optional) If true, then most errors - generated during the enrichment process will be treated as warnings and will not - cause the document to fail processing. - :param EnrichmentOptions options: (optional) An object representing the - configuration options to use for the `elements` enrichment. - """ - self.description = description - self.destination_field = destination_field - self.source_field = source_field - self.overwrite = overwrite - self.enrichment_name = enrichment_name - self.ignore_downstream_errors = ignore_downstream_errors - self.options = options - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Enrichment object from a json dictionary.""" - args = {} - if 'description' in _dict: - args['description'] = _dict.get('description') - if 'destination_field' in _dict: - args['destination_field'] = _dict.get('destination_field') - else: - raise ValueError( - 'Required property \'destination_field\' not present in Enrichment JSON' - ) - if 'source_field' in _dict: - args['source_field'] = _dict.get('source_field') - else: - raise ValueError( - 'Required property \'source_field\' not present in Enrichment JSON' - ) - if 'overwrite' in _dict: - args['overwrite'] = _dict.get('overwrite') - if 'enrichment' in _dict or 'enrichment_name' in _dict: - args['enrichment_name'] = _dict.get('enrichment') or _dict.get( - 'enrichment_name') - else: - raise ValueError( - 'Required property \'enrichment\' not present in Enrichment JSON' - ) - if 'ignore_downstream_errors' in _dict: - args['ignore_downstream_errors'] = _dict.get( - 'ignore_downstream_errors') - if 'options' in _dict: - args['options'] = EnrichmentOptions._from_dict(_dict.get('options')) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'description') and self.description is not None: - _dict['description'] = self.description - if hasattr(self, - 'destination_field') and self.destination_field is not None: - _dict['destination_field'] = self.destination_field - if hasattr(self, 'source_field') and self.source_field is not None: - _dict['source_field'] = self.source_field - if hasattr(self, 'overwrite') and self.overwrite is not None: - _dict['overwrite'] = self.overwrite - if hasattr(self, - 'enrichment_name') and self.enrichment_name is not None: - _dict['enrichment'] = self.enrichment_name - if hasattr(self, 'ignore_downstream_errors' - ) and self.ignore_downstream_errors is not None: - _dict['ignore_downstream_errors'] = self.ignore_downstream_errors - if hasattr(self, 'options') and self.options is not None: - _dict['options'] = self.options._to_dict() - return _dict - - def __str__(self): - """Return a `str` version of this Enrichment object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class EnrichmentOptions(object): - """ - An object representing the configuration options to use for the `elements` enrichment. - - :attr NluEnrichmentFeatures features: (optional) - :attr str language: (optional) ISO 639-1 code indicating the language to use for the - analysis. This code overrides the automatic language detection performed by the - service. Valid codes are `ar` (Arabic), `en` (English), `fr` (French), `de` (German), - `it` (Italian), `pt` (Portuguese), `ru` (Russian), `es` (Spanish), and `sv` (Swedish). - **Note:** Not all features support all languages, automatic detection is recommended. - :attr str model: (optional) *For use with `elements` enrichments only.* The element - extraction model to use. Models available are: `contract`. - """ - - def __init__(self, features=None, language=None, model=None): - """ - Initialize a EnrichmentOptions object. - - :param NluEnrichmentFeatures features: (optional) - :param str language: (optional) ISO 639-1 code indicating the language to use for - the analysis. This code overrides the automatic language detection performed by - the service. Valid codes are `ar` (Arabic), `en` (English), `fr` (French), `de` - (German), `it` (Italian), `pt` (Portuguese), `ru` (Russian), `es` (Spanish), and - `sv` (Swedish). **Note:** Not all features support all languages, automatic - detection is recommended. - :param str model: (optional) *For use with `elements` enrichments only.* The - element extraction model to use. Models available are: `contract`. - """ - self.features = features - self.language = language - self.model = model - - @classmethod - def _from_dict(cls, _dict): - """Initialize a EnrichmentOptions object from a json dictionary.""" - args = {} - if 'features' in _dict: - args['features'] = NluEnrichmentFeatures._from_dict( - _dict.get('features')) - if 'language' in _dict: - args['language'] = _dict.get('language') - if 'model' in _dict: - args['model'] = _dict.get('model') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'features') and self.features is not None: - _dict['features'] = self.features._to_dict() - if hasattr(self, 'language') and self.language is not None: - _dict['language'] = self.language - if hasattr(self, 'model') and self.model is not None: - _dict['model'] = self.model - return _dict - - def __str__(self): - """Return a `str` version of this EnrichmentOptions object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Environment(object): - """ - Details about an environment. - - :attr str environment_id: (optional) Unique identifier for the environment. - :attr str name: (optional) Name that identifies the environment. - :attr str description: (optional) Description of the environment. - :attr datetime created: (optional) Creation date of the environment, in the format - `yyyy-MM-dd'T'HH:mm:ss.SSS'Z'`. - :attr datetime updated: (optional) Date of most recent environment update, in the - format `yyyy-MM-dd'T'HH:mm:ss.SSS'Z'`. - :attr str status: (optional) Current status of the environment. `resizing` is - displayed when a request to increase the environment size has been made, but is still - in the process of being completed. - :attr bool read_only: (optional) If `true`, the environment contains read-only - collections that are maintained by IBM. - :attr str size: (optional) Current size of the environment. - :attr str requested_size: (optional) The new size requested for this environment. Only - returned when the environment *status* is `resizing`. - *Note:* Querying and indexing can still be performed during an environment upsize. - :attr IndexCapacity index_capacity: (optional) Details about the resource usage and - capacity of the environment. - :attr SearchStatus search_status: (optional) Information about the Continuous - Relevancy Training for this environment. - """ - - def __init__(self, - environment_id=None, - name=None, - description=None, - created=None, - updated=None, - status=None, - read_only=None, - size=None, - requested_size=None, - index_capacity=None, - search_status=None): - """ - Initialize a Environment object. - - :param str environment_id: (optional) Unique identifier for the environment. - :param str name: (optional) Name that identifies the environment. - :param str description: (optional) Description of the environment. - :param datetime created: (optional) Creation date of the environment, in the - format `yyyy-MM-dd'T'HH:mm:ss.SSS'Z'`. - :param datetime updated: (optional) Date of most recent environment update, in the - format `yyyy-MM-dd'T'HH:mm:ss.SSS'Z'`. - :param str status: (optional) Current status of the environment. `resizing` is - displayed when a request to increase the environment size has been made, but is - still in the process of being completed. - :param bool read_only: (optional) If `true`, the environment contains read-only - collections that are maintained by IBM. - :param str size: (optional) Current size of the environment. - :param str requested_size: (optional) The new size requested for this environment. - Only returned when the environment *status* is `resizing`. - *Note:* Querying and indexing can still be performed during an environment upsize. - :param IndexCapacity index_capacity: (optional) Details about the resource usage - and capacity of the environment. - :param SearchStatus search_status: (optional) Information about the Continuous - Relevancy Training for this environment. - """ - self.environment_id = environment_id - self.name = name - self.description = description - self.created = created - self.updated = updated - self.status = status - self.read_only = read_only - self.size = size - self.requested_size = requested_size - self.index_capacity = index_capacity - self.search_status = search_status - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Environment object from a json dictionary.""" - args = {} - if 'environment_id' in _dict: - args['environment_id'] = _dict.get('environment_id') - if 'name' in _dict: - args['name'] = _dict.get('name') - if 'description' in _dict: - args['description'] = _dict.get('description') - if 'created' in _dict: - args['created'] = string_to_datetime(_dict.get('created')) - if 'updated' in _dict: - args['updated'] = string_to_datetime(_dict.get('updated')) - if 'status' in _dict: - args['status'] = _dict.get('status') - if 'read_only' in _dict: - args['read_only'] = _dict.get('read_only') - if 'size' in _dict: - args['size'] = _dict.get('size') - if 'requested_size' in _dict: - args['requested_size'] = _dict.get('requested_size') - if 'index_capacity' in _dict: - args['index_capacity'] = IndexCapacity._from_dict( - _dict.get('index_capacity')) - if 'search_status' in _dict: - args['search_status'] = SearchStatus._from_dict( - _dict.get('search_status')) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'environment_id') and self.environment_id is not None: - _dict['environment_id'] = self.environment_id - if hasattr(self, 'name') and self.name is not None: - _dict['name'] = self.name - if hasattr(self, 'description') and self.description is not None: - _dict['description'] = self.description - if hasattr(self, 'created') and self.created is not None: - _dict['created'] = datetime_to_string(self.created) - if hasattr(self, 'updated') and self.updated is not None: - _dict['updated'] = datetime_to_string(self.updated) - if hasattr(self, 'status') and self.status is not None: - _dict['status'] = self.status - if hasattr(self, 'read_only') and self.read_only is not None: - _dict['read_only'] = self.read_only - if hasattr(self, 'size') and self.size is not None: - _dict['size'] = self.size - if hasattr(self, 'requested_size') and self.requested_size is not None: - _dict['requested_size'] = self.requested_size - if hasattr(self, 'index_capacity') and self.index_capacity is not None: - _dict['index_capacity'] = self.index_capacity._to_dict() - if hasattr(self, 'search_status') and self.search_status is not None: - _dict['search_status'] = self.search_status._to_dict() - return _dict - - def __str__(self): - """Return a `str` version of this Environment object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class EnvironmentDocuments(object): - """ - Summary of the document usage statistics for the environment. - - :attr int indexed: (optional) Number of documents indexed for the environment. - :attr int maximum_allowed: (optional) Total number of documents allowed in the - environment's capacity. - """ - - def __init__(self, indexed=None, maximum_allowed=None): - """ - Initialize a EnvironmentDocuments object. - - :param int indexed: (optional) Number of documents indexed for the environment. - :param int maximum_allowed: (optional) Total number of documents allowed in the - environment's capacity. - """ - self.indexed = indexed - self.maximum_allowed = maximum_allowed - - @classmethod - def _from_dict(cls, _dict): - """Initialize a EnvironmentDocuments object from a json dictionary.""" - args = {} - if 'indexed' in _dict: - args['indexed'] = _dict.get('indexed') - if 'maximum_allowed' in _dict: - args['maximum_allowed'] = _dict.get('maximum_allowed') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'indexed') and self.indexed is not None: - _dict['indexed'] = self.indexed - if hasattr(self, - 'maximum_allowed') and self.maximum_allowed is not None: - _dict['maximum_allowed'] = self.maximum_allowed - return _dict - - def __str__(self): - """Return a `str` version of this EnvironmentDocuments object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class EventData(object): - """ - Query event data object. - - :attr str environment_id: The **environment_id** associated with the query that the - event is associated with. - :attr str session_token: The session token that was returned as part of the query - results that this event is associated with. - :attr datetime client_timestamp: (optional) The optional timestamp for the event that - was created. If not provided, the time that the event was created in the log was used. - :attr int display_rank: (optional) The rank of the result item which the event is - associated with. - :attr str collection_id: The **collection_id** of the document that this event is - associated with. - :attr str document_id: The **document_id** of the document that this event is - associated with. - :attr str query_id: (optional) The query identifier stored in the log. The query and - any events associated with that query are stored with the same **query_id**. - """ - - def __init__(self, - environment_id, - session_token, - collection_id, - document_id, - client_timestamp=None, - display_rank=None, - query_id=None): - """ - Initialize a EventData object. - - :param str environment_id: The **environment_id** associated with the query that - the event is associated with. - :param str session_token: The session token that was returned as part of the query - results that this event is associated with. - :param str collection_id: The **collection_id** of the document that this event is - associated with. - :param str document_id: The **document_id** of the document that this event is - associated with. - :param datetime client_timestamp: (optional) The optional timestamp for the event - that was created. If not provided, the time that the event was created in the log - was used. - :param int display_rank: (optional) The rank of the result item which the event is - associated with. - :param str query_id: (optional) The query identifier stored in the log. The query - and any events associated with that query are stored with the same **query_id**. - """ - self.environment_id = environment_id - self.session_token = session_token - self.client_timestamp = client_timestamp - self.display_rank = display_rank - self.collection_id = collection_id - self.document_id = document_id - self.query_id = query_id - - @classmethod - def _from_dict(cls, _dict): - """Initialize a EventData object from a json dictionary.""" - args = {} - if 'environment_id' in _dict: - args['environment_id'] = _dict.get('environment_id') - else: - raise ValueError( - 'Required property \'environment_id\' not present in EventData JSON' - ) - if 'session_token' in _dict: - args['session_token'] = _dict.get('session_token') - else: - raise ValueError( - 'Required property \'session_token\' not present in EventData JSON' - ) - if 'client_timestamp' in _dict: - args['client_timestamp'] = string_to_datetime( - _dict.get('client_timestamp')) - if 'display_rank' in _dict: - args['display_rank'] = _dict.get('display_rank') - if 'collection_id' in _dict: - args['collection_id'] = _dict.get('collection_id') - else: - raise ValueError( - 'Required property \'collection_id\' not present in EventData JSON' - ) - if 'document_id' in _dict: - args['document_id'] = _dict.get('document_id') - else: - raise ValueError( - 'Required property \'document_id\' not present in EventData JSON' - ) - if 'query_id' in _dict: - args['query_id'] = _dict.get('query_id') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'environment_id') and self.environment_id is not None: - _dict['environment_id'] = self.environment_id - if hasattr(self, 'session_token') and self.session_token is not None: - _dict['session_token'] = self.session_token - if hasattr(self, - 'client_timestamp') and self.client_timestamp is not None: - _dict['client_timestamp'] = datetime_to_string( - self.client_timestamp) - if hasattr(self, 'display_rank') and self.display_rank is not None: - _dict['display_rank'] = self.display_rank - if hasattr(self, 'collection_id') and self.collection_id is not None: - _dict['collection_id'] = self.collection_id - if hasattr(self, 'document_id') and self.document_id is not None: - _dict['document_id'] = self.document_id - if hasattr(self, 'query_id') and self.query_id is not None: - _dict['query_id'] = self.query_id - return _dict - - def __str__(self): - """Return a `str` version of this EventData object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Expansion(object): - """ - An expansion definition. Each object respresents one set of expandable strings. For - example, you could have expansions for the word `hot` in one object, and expansions - for the word `cold` in another. - - :attr list[str] input_terms: (optional) A list of terms that will be expanded for this - expansion. If specified, only the items in this list are expanded. - :attr list[str] expanded_terms: A list of terms that this expansion will be expanded - to. If specified without **input_terms**, it also functions as the input term list. - """ - - def __init__(self, expanded_terms, input_terms=None): - """ - Initialize a Expansion object. - - :param list[str] expanded_terms: A list of terms that this expansion will be - expanded to. If specified without **input_terms**, it also functions as the input - term list. - :param list[str] input_terms: (optional) A list of terms that will be expanded for - this expansion. If specified, only the items in this list are expanded. - """ - self.input_terms = input_terms - self.expanded_terms = expanded_terms - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Expansion object from a json dictionary.""" - args = {} - if 'input_terms' in _dict: - args['input_terms'] = _dict.get('input_terms') - if 'expanded_terms' in _dict: - args['expanded_terms'] = _dict.get('expanded_terms') - else: - raise ValueError( - 'Required property \'expanded_terms\' not present in Expansion JSON' - ) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'input_terms') and self.input_terms is not None: - _dict['input_terms'] = self.input_terms - if hasattr(self, 'expanded_terms') and self.expanded_terms is not None: - _dict['expanded_terms'] = self.expanded_terms - return _dict - - def __str__(self): - """Return a `str` version of this Expansion object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Expansions(object): - """ - The query expansion definitions for the specified collection. - - :attr list[Expansion] expansions: An array of query expansion definitions. - Each object in the **expansions** array represents a term or set of terms that will - be expanded into other terms. Each expansion object can be configured as bidirectional - or unidirectional. Bidirectional means that all terms are expanded to all other terms - in the object. Unidirectional means that a set list of terms can be expanded into a - second list of terms. - To create a bi-directional expansion specify an **expanded_terms** array. When found - in a query, all items in the **expanded_terms** array are then expanded to the other - items in the same array. - To create a uni-directional expansion, specify both an array of **input_terms** and - an array of **expanded_terms**. When items in the **input_terms** array are present in - a query, they are expanded using the items listed in the **expanded_terms** array. - """ - - def __init__(self, expansions): - """ - Initialize a Expansions object. - - :param list[Expansion] expansions: An array of query expansion definitions. - Each object in the **expansions** array represents a term or set of terms that - will be expanded into other terms. Each expansion object can be configured as - bidirectional or unidirectional. Bidirectional means that all terms are expanded - to all other terms in the object. Unidirectional means that a set list of terms - can be expanded into a second list of terms. - To create a bi-directional expansion specify an **expanded_terms** array. When - found in a query, all items in the **expanded_terms** array are then expanded to - the other items in the same array. - To create a uni-directional expansion, specify both an array of **input_terms** - and an array of **expanded_terms**. When items in the **input_terms** array are - present in a query, they are expanded using the items listed in the - **expanded_terms** array. - """ - self.expansions = expansions - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Expansions object from a json dictionary.""" - args = {} - if 'expansions' in _dict: - args['expansions'] = [ - Expansion._from_dict(x) for x in (_dict.get('expansions')) - ] - else: - raise ValueError( - 'Required property \'expansions\' not present in Expansions JSON' - ) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'expansions') and self.expansions is not None: - _dict['expansions'] = [x._to_dict() for x in self.expansions] - return _dict - - def __str__(self): - """Return a `str` version of this Expansions object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Field(object): - """ - Field. - - :attr str field_name: (optional) The name of the field. - :attr str field_type: (optional) The type of the field. - """ - - def __init__(self, field_name=None, field_type=None): - """ - Initialize a Field object. - - :param str field_name: (optional) The name of the field. - :param str field_type: (optional) The type of the field. - """ - self.field_name = field_name - self.field_type = field_type - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Field object from a json dictionary.""" - args = {} - if 'field' in _dict or 'field_name' in _dict: - args['field_name'] = _dict.get('field') or _dict.get('field_name') - if 'type' in _dict or 'field_type' in _dict: - args['field_type'] = _dict.get('type') or _dict.get('field_type') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'field_name') and self.field_name is not None: - _dict['field'] = self.field_name - if hasattr(self, 'field_type') and self.field_type is not None: - _dict['type'] = self.field_type - return _dict - - def __str__(self): - """Return a `str` version of this Field object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Filter(object): - """ - Filter. - - :attr str match: (optional) The match the aggregated results queried for. - """ - - def __init__(self, - type=None, - results=None, - matching_results=None, - aggregations=None, - match=None): - """ - Initialize a Filter object. - - :param str type: (optional) The type of aggregation command used. For example: - term, filter, max, min, etc. - :param list[AggregationResult] results: (optional) Array of aggregation results. - :param int matching_results: (optional) Number of matching results. - :param list[QueryAggregation] aggregations: (optional) Aggregations returned by - the Discovery service. - :param str match: (optional) The match the aggregated results queried for. - """ - self.match = match - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Filter object from a json dictionary.""" - args = {} - if 'match' in _dict: - args['match'] = _dict.get('match') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'match') and self.match is not None: - _dict['match'] = self.match - return _dict - - def __str__(self): - """Return a `str` version of this Filter object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class FontSetting(object): - """ - FontSetting. - - :attr int level: (optional) The HTML heading level that any content with the matching - font will be converted to. - :attr int min_size: (optional) The minimum size of the font to match. - :attr int max_size: (optional) The maximum size of the font to match. - :attr bool bold: (optional) When `true`, the font is matched if it is bold. - :attr bool italic: (optional) When `true`, the font is matched if it is italic. - :attr str name: (optional) The name of the font. - """ - - def __init__(self, - level=None, - min_size=None, - max_size=None, - bold=None, - italic=None, - name=None): - """ - Initialize a FontSetting object. - - :param int level: (optional) The HTML heading level that any content with the - matching font will be converted to. - :param int min_size: (optional) The minimum size of the font to match. - :param int max_size: (optional) The maximum size of the font to match. - :param bool bold: (optional) When `true`, the font is matched if it is bold. - :param bool italic: (optional) When `true`, the font is matched if it is italic. - :param str name: (optional) The name of the font. - """ - self.level = level - self.min_size = min_size - self.max_size = max_size - self.bold = bold - self.italic = italic - self.name = name - - @classmethod - def _from_dict(cls, _dict): - """Initialize a FontSetting object from a json dictionary.""" - args = {} - if 'level' in _dict: - args['level'] = _dict.get('level') - if 'min_size' in _dict: - args['min_size'] = _dict.get('min_size') - if 'max_size' in _dict: - args['max_size'] = _dict.get('max_size') - if 'bold' in _dict: - args['bold'] = _dict.get('bold') - if 'italic' in _dict: - args['italic'] = _dict.get('italic') - if 'name' in _dict: - args['name'] = _dict.get('name') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'level') and self.level is not None: - _dict['level'] = self.level - if hasattr(self, 'min_size') and self.min_size is not None: - _dict['min_size'] = self.min_size - if hasattr(self, 'max_size') and self.max_size is not None: - _dict['max_size'] = self.max_size - if hasattr(self, 'bold') and self.bold is not None: - _dict['bold'] = self.bold - if hasattr(self, 'italic') and self.italic is not None: - _dict['italic'] = self.italic - if hasattr(self, 'name') and self.name is not None: - _dict['name'] = self.name - return _dict - - def __str__(self): - """Return a `str` version of this FontSetting object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Gateway(object): - """ - Object describing a specific gateway. - - :attr str gateway_id: (optional) The gateway ID of the gateway. - :attr str name: (optional) The user defined name of the gateway. - :attr str status: (optional) The current status of the gateway. `connected` means the - gateway is connected to the remotly installed gateway. `idle` means this gateway is - not currently in use. - :attr str token: (optional) The generated **token** for this gateway. The value of - this field is used when configuring the remotly installed gateway. - :attr str token_id: (optional) The generated **token_id** for this gateway. The value - of this field is used when configuring the remotly installed gateway. - """ - - def __init__(self, - gateway_id=None, - name=None, - status=None, - token=None, - token_id=None): - """ - Initialize a Gateway object. - - :param str gateway_id: (optional) The gateway ID of the gateway. - :param str name: (optional) The user defined name of the gateway. - :param str status: (optional) The current status of the gateway. `connected` means - the gateway is connected to the remotly installed gateway. `idle` means this - gateway is not currently in use. - :param str token: (optional) The generated **token** for this gateway. The value - of this field is used when configuring the remotly installed gateway. - :param str token_id: (optional) The generated **token_id** for this gateway. The - value of this field is used when configuring the remotly installed gateway. - """ - self.gateway_id = gateway_id - self.name = name - self.status = status - self.token = token - self.token_id = token_id - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Gateway object from a json dictionary.""" - args = {} - if 'gateway_id' in _dict: - args['gateway_id'] = _dict.get('gateway_id') - if 'name' in _dict: - args['name'] = _dict.get('name') - if 'status' in _dict: - args['status'] = _dict.get('status') - if 'token' in _dict: - args['token'] = _dict.get('token') - if 'token_id' in _dict: - args['token_id'] = _dict.get('token_id') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'gateway_id') and self.gateway_id is not None: - _dict['gateway_id'] = self.gateway_id - if hasattr(self, 'name') and self.name is not None: - _dict['name'] = self.name - if hasattr(self, 'status') and self.status is not None: - _dict['status'] = self.status - if hasattr(self, 'token') and self.token is not None: - _dict['token'] = self.token - if hasattr(self, 'token_id') and self.token_id is not None: - _dict['token_id'] = self.token_id - return _dict - - def __str__(self): - """Return a `str` version of this Gateway object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class GatewayDelete(object): - """ - Gatway deletion confirmation. - - :attr str gateway_id: (optional) The gateway ID of the deleted gateway. - :attr str status: (optional) The status of the request. - """ - - def __init__(self, gateway_id=None, status=None): - """ - Initialize a GatewayDelete object. - - :param str gateway_id: (optional) The gateway ID of the deleted gateway. - :param str status: (optional) The status of the request. - """ - self.gateway_id = gateway_id - self.status = status - - @classmethod - def _from_dict(cls, _dict): - """Initialize a GatewayDelete object from a json dictionary.""" - args = {} - if 'gateway_id' in _dict: - args['gateway_id'] = _dict.get('gateway_id') - if 'status' in _dict: - args['status'] = _dict.get('status') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'gateway_id') and self.gateway_id is not None: - _dict['gateway_id'] = self.gateway_id - if hasattr(self, 'status') and self.status is not None: - _dict['status'] = self.status - return _dict - - def __str__(self): - """Return a `str` version of this GatewayDelete object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class GatewayList(object): - """ - Object containing gateways array. - - :attr list[Gateway] gateways: (optional) Array of configured gateway connections. - """ - - def __init__(self, gateways=None): - """ - Initialize a GatewayList object. - - :param list[Gateway] gateways: (optional) Array of configured gateway connections. - """ - self.gateways = gateways - - @classmethod - def _from_dict(cls, _dict): - """Initialize a GatewayList object from a json dictionary.""" - args = {} - if 'gateways' in _dict: - args['gateways'] = [ - Gateway._from_dict(x) for x in (_dict.get('gateways')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'gateways') and self.gateways is not None: - _dict['gateways'] = [x._to_dict() for x in self.gateways] - return _dict - - def __str__(self): - """Return a `str` version of this GatewayList object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Histogram(object): - """ - Histogram. - - :attr str field: (optional) The field where the aggregation is located in the - document. - :attr int interval: (optional) Interval of the aggregation. (For 'histogram' type). - """ - - def __init__(self, - type=None, - results=None, - matching_results=None, - aggregations=None, - field=None, - interval=None): - """ - Initialize a Histogram object. - - :param str type: (optional) The type of aggregation command used. For example: - term, filter, max, min, etc. - :param list[AggregationResult] results: (optional) Array of aggregation results. - :param int matching_results: (optional) Number of matching results. - :param list[QueryAggregation] aggregations: (optional) Aggregations returned by - the Discovery service. - :param str field: (optional) The field where the aggregation is located in the - document. - :param int interval: (optional) Interval of the aggregation. (For 'histogram' - type). - """ - self.field = field - self.interval = interval - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Histogram object from a json dictionary.""" - args = {} - if 'field' in _dict: - args['field'] = _dict.get('field') - if 'interval' in _dict: - args['interval'] = _dict.get('interval') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'field') and self.field is not None: - _dict['field'] = self.field - if hasattr(self, 'interval') and self.interval is not None: - _dict['interval'] = self.interval - return _dict - - def __str__(self): - """Return a `str` version of this Histogram object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class HtmlSettings(object): - """ - A list of HTML conversion settings. - - :attr list[str] exclude_tags_completely: (optional) Array of HTML tags that are - excluded completely. - :attr list[str] exclude_tags_keep_content: (optional) Array of HTML tags which are - excluded but still retain content. - :attr XPathPatterns keep_content: (optional) - :attr XPathPatterns exclude_content: (optional) - :attr list[str] keep_tag_attributes: (optional) An array of HTML tag attributes to - keep in the converted document. - :attr list[str] exclude_tag_attributes: (optional) Array of HTML tag attributes to - exclude. - """ - - def __init__(self, - exclude_tags_completely=None, - exclude_tags_keep_content=None, - keep_content=None, - exclude_content=None, - keep_tag_attributes=None, - exclude_tag_attributes=None): - """ - Initialize a HtmlSettings object. - - :param list[str] exclude_tags_completely: (optional) Array of HTML tags that are - excluded completely. - :param list[str] exclude_tags_keep_content: (optional) Array of HTML tags which - are excluded but still retain content. - :param XPathPatterns keep_content: (optional) - :param XPathPatterns exclude_content: (optional) - :param list[str] keep_tag_attributes: (optional) An array of HTML tag attributes - to keep in the converted document. - :param list[str] exclude_tag_attributes: (optional) Array of HTML tag attributes - to exclude. - """ - self.exclude_tags_completely = exclude_tags_completely - self.exclude_tags_keep_content = exclude_tags_keep_content - self.keep_content = keep_content - self.exclude_content = exclude_content - self.keep_tag_attributes = keep_tag_attributes - self.exclude_tag_attributes = exclude_tag_attributes - - @classmethod - def _from_dict(cls, _dict): - """Initialize a HtmlSettings object from a json dictionary.""" - args = {} - if 'exclude_tags_completely' in _dict: - args['exclude_tags_completely'] = _dict.get( - 'exclude_tags_completely') - if 'exclude_tags_keep_content' in _dict: - args['exclude_tags_keep_content'] = _dict.get( - 'exclude_tags_keep_content') - if 'keep_content' in _dict: - args['keep_content'] = XPathPatterns._from_dict( - _dict.get('keep_content')) - if 'exclude_content' in _dict: - args['exclude_content'] = XPathPatterns._from_dict( - _dict.get('exclude_content')) - if 'keep_tag_attributes' in _dict: - args['keep_tag_attributes'] = _dict.get('keep_tag_attributes') - if 'exclude_tag_attributes' in _dict: - args['exclude_tag_attributes'] = _dict.get('exclude_tag_attributes') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'exclude_tags_completely' - ) and self.exclude_tags_completely is not None: - _dict['exclude_tags_completely'] = self.exclude_tags_completely - if hasattr(self, 'exclude_tags_keep_content' - ) and self.exclude_tags_keep_content is not None: - _dict['exclude_tags_keep_content'] = self.exclude_tags_keep_content - if hasattr(self, 'keep_content') and self.keep_content is not None: - _dict['keep_content'] = self.keep_content._to_dict() - if hasattr(self, - 'exclude_content') and self.exclude_content is not None: - _dict['exclude_content'] = self.exclude_content._to_dict() - if hasattr( - self, - 'keep_tag_attributes') and self.keep_tag_attributes is not None: - _dict['keep_tag_attributes'] = self.keep_tag_attributes - if hasattr(self, 'exclude_tag_attributes' - ) and self.exclude_tag_attributes is not None: - _dict['exclude_tag_attributes'] = self.exclude_tag_attributes - return _dict - - def __str__(self): - """Return a `str` version of this HtmlSettings object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class IndexCapacity(object): - """ - Details about the resource usage and capacity of the environment. - - :attr EnvironmentDocuments documents: (optional) Summary of the document usage - statistics for the environment. - :attr DiskUsage disk_usage: (optional) Summary of the disk usage statistics for the - environment. - :attr CollectionUsage collections: (optional) Summary of the collection usage in the - environment. - """ - - def __init__(self, documents=None, disk_usage=None, collections=None): - """ - Initialize a IndexCapacity object. - - :param EnvironmentDocuments documents: (optional) Summary of the document usage - statistics for the environment. - :param DiskUsage disk_usage: (optional) Summary of the disk usage statistics for - the environment. - :param CollectionUsage collections: (optional) Summary of the collection usage in - the environment. - """ - self.documents = documents - self.disk_usage = disk_usage - self.collections = collections - - @classmethod - def _from_dict(cls, _dict): - """Initialize a IndexCapacity object from a json dictionary.""" - args = {} - if 'documents' in _dict: - args['documents'] = EnvironmentDocuments._from_dict( - _dict.get('documents')) - if 'disk_usage' in _dict: - args['disk_usage'] = DiskUsage._from_dict(_dict.get('disk_usage')) - if 'collections' in _dict: - args['collections'] = CollectionUsage._from_dict( - _dict.get('collections')) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'documents') and self.documents is not None: - _dict['documents'] = self.documents._to_dict() - if hasattr(self, 'disk_usage') and self.disk_usage is not None: - _dict['disk_usage'] = self.disk_usage._to_dict() - if hasattr(self, 'collections') and self.collections is not None: - _dict['collections'] = self.collections._to_dict() - return _dict - - def __str__(self): - """Return a `str` version of this IndexCapacity object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class ListCollectionFieldsResponse(object): - """ - The list of fetched fields. - The fields are returned using a fully qualified name format, however, the format - differs slightly from that used by the query operations. - * Fields which contain nested JSON objects are assigned a type of "nested". - * Fields which belong to a nested object are prefixed with `.properties` (for - example, `warnings.properties.severity` means that the `warnings` object has a - property called `severity`). - * Fields returned from the News collection are prefixed with - `v{N}-fullnews-t3-{YEAR}.mappings` (for example, - `v5-fullnews-t3-2016.mappings.text.properties.author`). - - :attr list[Field] fields: (optional) An array containing information about each field - in the collections. - """ - - def __init__(self, fields=None): - """ - Initialize a ListCollectionFieldsResponse object. - - :param list[Field] fields: (optional) An array containing information about each - field in the collections. - """ - self.fields = fields - - @classmethod - def _from_dict(cls, _dict): - """Initialize a ListCollectionFieldsResponse object from a json dictionary.""" - args = {} - if 'fields' in _dict: - args['fields'] = [ - Field._from_dict(x) for x in (_dict.get('fields')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'fields') and self.fields is not None: - _dict['fields'] = [x._to_dict() for x in self.fields] - return _dict - - def __str__(self): - """Return a `str` version of this ListCollectionFieldsResponse object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class ListCollectionsResponse(object): - """ - ListCollectionsResponse. - - :attr list[Collection] collections: (optional) An array containing information about - each collection in the environment. - """ - - def __init__(self, collections=None): - """ - Initialize a ListCollectionsResponse object. - - :param list[Collection] collections: (optional) An array containing information - about each collection in the environment. - """ - self.collections = collections - - @classmethod - def _from_dict(cls, _dict): - """Initialize a ListCollectionsResponse object from a json dictionary.""" - args = {} - if 'collections' in _dict: - args['collections'] = [ - Collection._from_dict(x) for x in (_dict.get('collections')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'collections') and self.collections is not None: - _dict['collections'] = [x._to_dict() for x in self.collections] - return _dict - - def __str__(self): - """Return a `str` version of this ListCollectionsResponse object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class ListConfigurationsResponse(object): - """ - ListConfigurationsResponse. - - :attr list[Configuration] configurations: (optional) An array of Configurations that - are available for the service instance. - """ - - def __init__(self, configurations=None): - """ - Initialize a ListConfigurationsResponse object. - - :param list[Configuration] configurations: (optional) An array of Configurations - that are available for the service instance. - """ - self.configurations = configurations - - @classmethod - def _from_dict(cls, _dict): - """Initialize a ListConfigurationsResponse object from a json dictionary.""" - args = {} - if 'configurations' in _dict: - args['configurations'] = [ - Configuration._from_dict(x) - for x in (_dict.get('configurations')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'configurations') and self.configurations is not None: - _dict['configurations'] = [ - x._to_dict() for x in self.configurations - ] - return _dict - - def __str__(self): - """Return a `str` version of this ListConfigurationsResponse object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class ListEnvironmentsResponse(object): - """ - ListEnvironmentsResponse. - - :attr list[Environment] environments: (optional) An array of [environments] that are - available for the service instance. - """ - - def __init__(self, environments=None): - """ - Initialize a ListEnvironmentsResponse object. - - :param list[Environment] environments: (optional) An array of [environments] that - are available for the service instance. - """ - self.environments = environments - - @classmethod - def _from_dict(cls, _dict): - """Initialize a ListEnvironmentsResponse object from a json dictionary.""" - args = {} - if 'environments' in _dict: - args['environments'] = [ - Environment._from_dict(x) for x in (_dict.get('environments')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'environments') and self.environments is not None: - _dict['environments'] = [x._to_dict() for x in self.environments] - return _dict - - def __str__(self): - """Return a `str` version of this ListEnvironmentsResponse object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class LogQueryResponse(object): - """ - Object containing results that match the requested **logs** query. - - :attr int matching_results: (optional) Number of matching results. - :attr list[LogQueryResponseResult] results: (optional) Array of log query response - results. - """ - - def __init__(self, matching_results=None, results=None): - """ - Initialize a LogQueryResponse object. - - :param int matching_results: (optional) Number of matching results. - :param list[LogQueryResponseResult] results: (optional) Array of log query - response results. - """ - self.matching_results = matching_results - self.results = results - - @classmethod - def _from_dict(cls, _dict): - """Initialize a LogQueryResponse object from a json dictionary.""" - args = {} - if 'matching_results' in _dict: - args['matching_results'] = _dict.get('matching_results') - if 'results' in _dict: - args['results'] = [ - LogQueryResponseResult._from_dict(x) - for x in (_dict.get('results')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, - 'matching_results') and self.matching_results is not None: - _dict['matching_results'] = self.matching_results - if hasattr(self, 'results') and self.results is not None: - _dict['results'] = [x._to_dict() for x in self.results] - return _dict - - def __str__(self): - """Return a `str` version of this LogQueryResponse object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class LogQueryResponseResult(object): - """ - Individual result object for a **logs** query. Each object represents either a query - to a Discovery collection or an event that is associated with a query. - - :attr str environment_id: (optional) The environment ID that is associated with this - log entry. - :attr str customer_id: (optional) The **customer_id** label that was specified in the - header of the query or event API call that corresponds to this log entry. - :attr str document_type: (optional) The type of log entry returned. - **query** indicates that the log represents the results of a call to the single - collection **query** method. - **event** indicates that the log represents a call to the **events** API. - :attr str natural_language_query: (optional) The value of the - **natural_language_query** query parameter that was used to create these results. Only - returned with logs of type **query**. - **Note:** Other query parameters (such as **filter** or **deduplicate**) might have - been used with this query, but are not recorded. - :attr LogQueryResponseResultDocuments document_results: (optional) Object containing - result information that was returned by the query used to create this log entry. Only - returned with logs of type `query`. - :attr datetime created_timestamp: (optional) Date that the log result was created. - Returned in `YYYY-MM-DDThh:mm:ssZ` format. - :attr datetime client_timestamp: (optional) Date specified by the user when recording - an event. Returned in `YYYY-MM-DDThh:mm:ssZ` format. Only returned with logs of type - **event**. - :attr str query_id: (optional) Identifier that corresponds to the - **natural_language_query** string used in the original or associated query. All - **event** and **query** log entries that have the same original - **natural_language_query** string also have them same **query_id**. This field can be - used to recall all **event** and **query** log results that have the same original - query (**event** logs do not contain the original **natural_language_query** field). - :attr str session_token: (optional) Unique identifier (within a 24-hour period) that - identifies a single `query` log and any `event` logs that were created for it. - **Note:** If the exact same query is run at the exact same time on different days, the - **session_token** for those queries might be identical. However, the - **created_timestamp** differs. - **Note:** Session tokens are case sensitive. To avoid matching on session tokens that - are identical except for case, use the exact match operator (`::`) when you query for - a specific session token. - :attr str collection_id: (optional) The collection ID of the document associated with - this event. Only returned with logs of type `event`. - :attr int display_rank: (optional) The original display rank of the document - associated with this event. Only returned with logs of type `event`. - :attr str document_id: (optional) The document ID of the document associated with this - event. Only returned with logs of type `event`. - :attr str event_type: (optional) The type of event that this object respresents. - Possible values are - - `query` the log of a query to a collection - - `click` the result of a call to the **events** endpoint. - :attr str result_type: (optional) The type of result that this **event** is associated - with. Only returned with logs of type `event`. - """ - - def __init__(self, - environment_id=None, - customer_id=None, - document_type=None, - natural_language_query=None, - document_results=None, - created_timestamp=None, - client_timestamp=None, - query_id=None, - session_token=None, - collection_id=None, - display_rank=None, - document_id=None, - event_type=None, - result_type=None): - """ - Initialize a LogQueryResponseResult object. - - :param str environment_id: (optional) The environment ID that is associated with - this log entry. - :param str customer_id: (optional) The **customer_id** label that was specified in - the header of the query or event API call that corresponds to this log entry. - :param str document_type: (optional) The type of log entry returned. - **query** indicates that the log represents the results of a call to the single - collection **query** method. - **event** indicates that the log represents a call to the **events** API. - :param str natural_language_query: (optional) The value of the - **natural_language_query** query parameter that was used to create these results. - Only returned with logs of type **query**. - **Note:** Other query parameters (such as **filter** or **deduplicate**) might - have been used with this query, but are not recorded. - :param LogQueryResponseResultDocuments document_results: (optional) Object - containing result information that was returned by the query used to create this - log entry. Only returned with logs of type `query`. - :param datetime created_timestamp: (optional) Date that the log result was - created. Returned in `YYYY-MM-DDThh:mm:ssZ` format. - :param datetime client_timestamp: (optional) Date specified by the user when - recording an event. Returned in `YYYY-MM-DDThh:mm:ssZ` format. Only returned with - logs of type **event**. - :param str query_id: (optional) Identifier that corresponds to the - **natural_language_query** string used in the original or associated query. All - **event** and **query** log entries that have the same original - **natural_language_query** string also have them same **query_id**. This field can - be used to recall all **event** and **query** log results that have the same - original query (**event** logs do not contain the original - **natural_language_query** field). - :param str session_token: (optional) Unique identifier (within a 24-hour period) - that identifies a single `query` log and any `event` logs that were created for - it. - **Note:** If the exact same query is run at the exact same time on different days, - the **session_token** for those queries might be identical. However, the - **created_timestamp** differs. - **Note:** Session tokens are case sensitive. To avoid matching on session tokens - that are identical except for case, use the exact match operator (`::`) when you - query for a specific session token. - :param str collection_id: (optional) The collection ID of the document associated - with this event. Only returned with logs of type `event`. - :param int display_rank: (optional) The original display rank of the document - associated with this event. Only returned with logs of type `event`. - :param str document_id: (optional) The document ID of the document associated with - this event. Only returned with logs of type `event`. - :param str event_type: (optional) The type of event that this object respresents. - Possible values are - - `query` the log of a query to a collection - - `click` the result of a call to the **events** endpoint. - :param str result_type: (optional) The type of result that this **event** is - associated with. Only returned with logs of type `event`. - """ - self.environment_id = environment_id - self.customer_id = customer_id - self.document_type = document_type - self.natural_language_query = natural_language_query - self.document_results = document_results - self.created_timestamp = created_timestamp - self.client_timestamp = client_timestamp - self.query_id = query_id - self.session_token = session_token - self.collection_id = collection_id - self.display_rank = display_rank - self.document_id = document_id - self.event_type = event_type - self.result_type = result_type - - @classmethod - def _from_dict(cls, _dict): - """Initialize a LogQueryResponseResult object from a json dictionary.""" - args = {} - if 'environment_id' in _dict: - args['environment_id'] = _dict.get('environment_id') - if 'customer_id' in _dict: - args['customer_id'] = _dict.get('customer_id') - if 'document_type' in _dict: - args['document_type'] = _dict.get('document_type') - if 'natural_language_query' in _dict: - args['natural_language_query'] = _dict.get('natural_language_query') - if 'document_results' in _dict: - args[ - 'document_results'] = LogQueryResponseResultDocuments._from_dict( - _dict.get('document_results')) - if 'created_timestamp' in _dict: - args['created_timestamp'] = string_to_datetime( - _dict.get('created_timestamp')) - if 'client_timestamp' in _dict: - args['client_timestamp'] = string_to_datetime( - _dict.get('client_timestamp')) - if 'query_id' in _dict: - args['query_id'] = _dict.get('query_id') - if 'session_token' in _dict: - args['session_token'] = _dict.get('session_token') - if 'collection_id' in _dict: - args['collection_id'] = _dict.get('collection_id') - if 'display_rank' in _dict: - args['display_rank'] = _dict.get('display_rank') - if 'document_id' in _dict: - args['document_id'] = _dict.get('document_id') - if 'event_type' in _dict: - args['event_type'] = _dict.get('event_type') - if 'result_type' in _dict: - args['result_type'] = _dict.get('result_type') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'environment_id') and self.environment_id is not None: - _dict['environment_id'] = self.environment_id - if hasattr(self, 'customer_id') and self.customer_id is not None: - _dict['customer_id'] = self.customer_id - if hasattr(self, 'document_type') and self.document_type is not None: - _dict['document_type'] = self.document_type - if hasattr(self, 'natural_language_query' - ) and self.natural_language_query is not None: - _dict['natural_language_query'] = self.natural_language_query - if hasattr(self, - 'document_results') and self.document_results is not None: - _dict['document_results'] = self.document_results._to_dict() - if hasattr(self, - 'created_timestamp') and self.created_timestamp is not None: - _dict['created_timestamp'] = datetime_to_string( - self.created_timestamp) - if hasattr(self, - 'client_timestamp') and self.client_timestamp is not None: - _dict['client_timestamp'] = datetime_to_string( - self.client_timestamp) - if hasattr(self, 'query_id') and self.query_id is not None: - _dict['query_id'] = self.query_id - if hasattr(self, 'session_token') and self.session_token is not None: - _dict['session_token'] = self.session_token - if hasattr(self, 'collection_id') and self.collection_id is not None: - _dict['collection_id'] = self.collection_id - if hasattr(self, 'display_rank') and self.display_rank is not None: - _dict['display_rank'] = self.display_rank - if hasattr(self, 'document_id') and self.document_id is not None: - _dict['document_id'] = self.document_id - if hasattr(self, 'event_type') and self.event_type is not None: - _dict['event_type'] = self.event_type - if hasattr(self, 'result_type') and self.result_type is not None: - _dict['result_type'] = self.result_type - return _dict - - def __str__(self): - """Return a `str` version of this LogQueryResponseResult object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class LogQueryResponseResultDocuments(object): - """ - Object containing result information that was returned by the query used to create - this log entry. Only returned with logs of type `query`. - - :attr list[LogQueryResponseResultDocumentsResult] results: (optional) Array of log - query response results. - :attr int count: (optional) The number of results returned in the query associate with - this log. - """ - - def __init__(self, results=None, count=None): - """ - Initialize a LogQueryResponseResultDocuments object. - - :param list[LogQueryResponseResultDocumentsResult] results: (optional) Array of - log query response results. - :param int count: (optional) The number of results returned in the query associate - with this log. - """ - self.results = results - self.count = count - - @classmethod - def _from_dict(cls, _dict): - """Initialize a LogQueryResponseResultDocuments object from a json dictionary.""" - args = {} - if 'results' in _dict: - args['results'] = [ - LogQueryResponseResultDocumentsResult._from_dict(x) - for x in (_dict.get('results')) - ] - if 'count' in _dict: - args['count'] = _dict.get('count') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'results') and self.results is not None: - _dict['results'] = [x._to_dict() for x in self.results] - if hasattr(self, 'count') and self.count is not None: - _dict['count'] = self.count - return _dict - - def __str__(self): - """Return a `str` version of this LogQueryResponseResultDocuments object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class LogQueryResponseResultDocumentsResult(object): - """ - Each object in the **results** array corresponds to an individual document returned by - the original query. - - :attr int position: (optional) The result rank of this document. A position of `1` - indicates that it was the first returned result. - :attr str document_id: (optional) The **document_id** of the document that this result - represents. - :attr float score: (optional) The raw score of this result. A higher score indicates a - greater match to the query parameters. - :attr float confidence: (optional) The confidence score of the result's analysis. A - higher score indicating greater confidence. - :attr str collection_id: (optional) The **collection_id** of the document represented - by this result. - """ - - def __init__(self, - position=None, - document_id=None, - score=None, - confidence=None, - collection_id=None): - """ - Initialize a LogQueryResponseResultDocumentsResult object. - - :param int position: (optional) The result rank of this document. A position of - `1` indicates that it was the first returned result. - :param str document_id: (optional) The **document_id** of the document that this - result represents. - :param float score: (optional) The raw score of this result. A higher score - indicates a greater match to the query parameters. - :param float confidence: (optional) The confidence score of the result's analysis. - A higher score indicating greater confidence. - :param str collection_id: (optional) The **collection_id** of the document - represented by this result. - """ - self.position = position - self.document_id = document_id - self.score = score - self.confidence = confidence - self.collection_id = collection_id - - @classmethod - def _from_dict(cls, _dict): - """Initialize a LogQueryResponseResultDocumentsResult object from a json dictionary.""" - args = {} - if 'position' in _dict: - args['position'] = _dict.get('position') - if 'document_id' in _dict: - args['document_id'] = _dict.get('document_id') - if 'score' in _dict: - args['score'] = _dict.get('score') - if 'confidence' in _dict: - args['confidence'] = _dict.get('confidence') - if 'collection_id' in _dict: - args['collection_id'] = _dict.get('collection_id') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'position') and self.position is not None: - _dict['position'] = self.position - if hasattr(self, 'document_id') and self.document_id is not None: - _dict['document_id'] = self.document_id - if hasattr(self, 'score') and self.score is not None: - _dict['score'] = self.score - if hasattr(self, 'confidence') and self.confidence is not None: - _dict['confidence'] = self.confidence - if hasattr(self, 'collection_id') and self.collection_id is not None: - _dict['collection_id'] = self.collection_id - return _dict - - def __str__(self): - """Return a `str` version of this LogQueryResponseResultDocumentsResult object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class MetricAggregation(object): - """ - An aggregation analyzing log information for queries and events. - - :attr str interval: (optional) The measurement interval for this metric. Metric - intervals are always 1 day (`1d`). - :attr str event_type: (optional) The event type associated with this metric result. - This field, when present, will always be `click`. - :attr list[MetricAggregationResult] results: (optional) Array of metric aggregation - query results. - """ - - def __init__(self, interval=None, event_type=None, results=None): - """ - Initialize a MetricAggregation object. - - :param str interval: (optional) The measurement interval for this metric. Metric - intervals are always 1 day (`1d`). - :param str event_type: (optional) The event type associated with this metric - result. This field, when present, will always be `click`. - :param list[MetricAggregationResult] results: (optional) Array of metric - aggregation query results. - """ - self.interval = interval - self.event_type = event_type - self.results = results - - @classmethod - def _from_dict(cls, _dict): - """Initialize a MetricAggregation object from a json dictionary.""" - args = {} - if 'interval' in _dict: - args['interval'] = _dict.get('interval') - if 'event_type' in _dict: - args['event_type'] = _dict.get('event_type') - if 'results' in _dict: - args['results'] = [ - MetricAggregationResult._from_dict(x) - for x in (_dict.get('results')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'interval') and self.interval is not None: - _dict['interval'] = self.interval - if hasattr(self, 'event_type') and self.event_type is not None: - _dict['event_type'] = self.event_type - if hasattr(self, 'results') and self.results is not None: - _dict['results'] = [x._to_dict() for x in self.results] - return _dict - - def __str__(self): - """Return a `str` version of this MetricAggregation object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class MetricAggregationResult(object): - """ - Aggregation result data for the requested metric. - - :attr datetime key_as_string: (optional) Date in string form representing the start of - this interval. - :attr int key: (optional) Unix epoch time equivalent of the **key_as_string**, that - represents the start of this interval. - :attr int matching_results: (optional) Number of matching results. - :attr float event_rate: (optional) The number of queries with associated events - divided by the total number of queries for the interval. Only returned with - **event_rate** metrics. - """ - - def __init__(self, - key_as_string=None, - key=None, - matching_results=None, - event_rate=None): - """ - Initialize a MetricAggregationResult object. - - :param datetime key_as_string: (optional) Date in string form representing the - start of this interval. - :param int key: (optional) Unix epoch time equivalent of the **key_as_string**, - that represents the start of this interval. - :param int matching_results: (optional) Number of matching results. - :param float event_rate: (optional) The number of queries with associated events - divided by the total number of queries for the interval. Only returned with - **event_rate** metrics. - """ - self.key_as_string = key_as_string - self.key = key - self.matching_results = matching_results - self.event_rate = event_rate - - @classmethod - def _from_dict(cls, _dict): - """Initialize a MetricAggregationResult object from a json dictionary.""" - args = {} - if 'key_as_string' in _dict: - args['key_as_string'] = string_to_datetime( - _dict.get('key_as_string')) - if 'key' in _dict: - args['key'] = _dict.get('key') - if 'matching_results' in _dict: - args['matching_results'] = _dict.get('matching_results') - if 'event_rate' in _dict: - args['event_rate'] = _dict.get('event_rate') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'key_as_string') and self.key_as_string is not None: - _dict['key_as_string'] = datetime_to_string(self.key_as_string) - if hasattr(self, 'key') and self.key is not None: - _dict['key'] = self.key - if hasattr(self, - 'matching_results') and self.matching_results is not None: - _dict['matching_results'] = self.matching_results - if hasattr(self, 'event_rate') and self.event_rate is not None: - _dict['event_rate'] = self.event_rate - return _dict - - def __str__(self): - """Return a `str` version of this MetricAggregationResult object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class MetricResponse(object): - """ - The response generated from a call to a **metrics** method. - - :attr list[MetricAggregation] aggregations: (optional) Array of metric aggregations. - """ - - def __init__(self, aggregations=None): - """ - Initialize a MetricResponse object. - - :param list[MetricAggregation] aggregations: (optional) Array of metric - aggregations. - """ - self.aggregations = aggregations - - @classmethod - def _from_dict(cls, _dict): - """Initialize a MetricResponse object from a json dictionary.""" - args = {} - if 'aggregations' in _dict: - args['aggregations'] = [ - MetricAggregation._from_dict(x) - for x in (_dict.get('aggregations')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'aggregations') and self.aggregations is not None: - _dict['aggregations'] = [x._to_dict() for x in self.aggregations] - return _dict - - def __str__(self): - """Return a `str` version of this MetricResponse object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class MetricTokenAggregation(object): - """ - An aggregation analyzing log information for queries and events. - - :attr str event_type: (optional) The event type associated with this metric result. - This field, when present, will always be `click`. - :attr list[MetricTokenAggregationResult] results: (optional) Array of results for the - metric token aggregation. - """ - - def __init__(self, event_type=None, results=None): - """ - Initialize a MetricTokenAggregation object. - - :param str event_type: (optional) The event type associated with this metric - result. This field, when present, will always be `click`. - :param list[MetricTokenAggregationResult] results: (optional) Array of results for - the metric token aggregation. - """ - self.event_type = event_type - self.results = results - - @classmethod - def _from_dict(cls, _dict): - """Initialize a MetricTokenAggregation object from a json dictionary.""" - args = {} - if 'event_type' in _dict: - args['event_type'] = _dict.get('event_type') - if 'results' in _dict: - args['results'] = [ - MetricTokenAggregationResult._from_dict(x) - for x in (_dict.get('results')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'event_type') and self.event_type is not None: - _dict['event_type'] = self.event_type - if hasattr(self, 'results') and self.results is not None: - _dict['results'] = [x._to_dict() for x in self.results] - return _dict - - def __str__(self): - """Return a `str` version of this MetricTokenAggregation object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class MetricTokenAggregationResult(object): - """ - Aggregation result data for the requested metric. - - :attr str key: (optional) The content of the **natural_language_query** parameter used - in the query that this result represents. - :attr int matching_results: (optional) Number of matching results. - :attr float event_rate: (optional) The number of queries with associated events - divided by the total number of queries currently stored (queries and events are stored - in the log for 30 days). - """ - - def __init__(self, key=None, matching_results=None, event_rate=None): - """ - Initialize a MetricTokenAggregationResult object. - - :param str key: (optional) The content of the **natural_language_query** parameter - used in the query that this result represents. - :param int matching_results: (optional) Number of matching results. - :param float event_rate: (optional) The number of queries with associated events - divided by the total number of queries currently stored (queries and events are - stored in the log for 30 days). - """ - self.key = key - self.matching_results = matching_results - self.event_rate = event_rate - - @classmethod - def _from_dict(cls, _dict): - """Initialize a MetricTokenAggregationResult object from a json dictionary.""" - args = {} - if 'key' in _dict: - args['key'] = _dict.get('key') - if 'matching_results' in _dict: - args['matching_results'] = _dict.get('matching_results') - if 'event_rate' in _dict: - args['event_rate'] = _dict.get('event_rate') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'key') and self.key is not None: - _dict['key'] = self.key - if hasattr(self, - 'matching_results') and self.matching_results is not None: - _dict['matching_results'] = self.matching_results - if hasattr(self, 'event_rate') and self.event_rate is not None: - _dict['event_rate'] = self.event_rate - return _dict - - def __str__(self): - """Return a `str` version of this MetricTokenAggregationResult object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class MetricTokenResponse(object): - """ - The response generated from a call to a **metrics** method that evaluates tokens. - - :attr list[MetricTokenAggregation] aggregations: (optional) Array of metric token - aggregations. - """ - - def __init__(self, aggregations=None): - """ - Initialize a MetricTokenResponse object. - - :param list[MetricTokenAggregation] aggregations: (optional) Array of metric token - aggregations. - """ - self.aggregations = aggregations - - @classmethod - def _from_dict(cls, _dict): - """Initialize a MetricTokenResponse object from a json dictionary.""" - args = {} - if 'aggregations' in _dict: - args['aggregations'] = [ - MetricTokenAggregation._from_dict(x) - for x in (_dict.get('aggregations')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'aggregations') and self.aggregations is not None: - _dict['aggregations'] = [x._to_dict() for x in self.aggregations] - return _dict - - def __str__(self): - """Return a `str` version of this MetricTokenResponse object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Nested(object): - """ - Nested. - - :attr str path: (optional) The area of the results the aggregation was restricted to. - """ - - def __init__(self, - type=None, - results=None, - matching_results=None, - aggregations=None, - path=None): - """ - Initialize a Nested object. - - :param str type: (optional) The type of aggregation command used. For example: - term, filter, max, min, etc. - :param list[AggregationResult] results: (optional) Array of aggregation results. - :param int matching_results: (optional) Number of matching results. - :param list[QueryAggregation] aggregations: (optional) Aggregations returned by - the Discovery service. - :param str path: (optional) The area of the results the aggregation was restricted - to. - """ - self.path = path - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Nested object from a json dictionary.""" - args = {} - if 'path' in _dict: - args['path'] = _dict.get('path') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'path') and self.path is not None: - _dict['path'] = self.path - return _dict - - def __str__(self): - """Return a `str` version of this Nested object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class NluEnrichmentCategories(object): - """ - An object that indicates the Categories enrichment will be applied to the specified - field. - - """ - - def __init__(self, **kwargs): - """ - Initialize a NluEnrichmentCategories object. - - :param **kwargs: (optional) Any additional properties. - """ - for _key, _value in kwargs.items(): - setattr(self, _key, _value) - - @classmethod - def _from_dict(cls, _dict): - """Initialize a NluEnrichmentCategories object from a json dictionary.""" - args = {} - xtra = _dict.copy() - args.update(xtra) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, '_additionalProperties'): - for _key in self._additionalProperties: - _value = getattr(self, _key, None) - if _value is not None: - _dict[_key] = _value - return _dict - - def __setattr__(self, name, value): - properties = {} - if not hasattr(self, '_additionalProperties'): - super(NluEnrichmentCategories, self).__setattr__( - '_additionalProperties', set()) - if name not in properties: - self._additionalProperties.add(name) - super(NluEnrichmentCategories, self).__setattr__(name, value) - - def __str__(self): - """Return a `str` version of this NluEnrichmentCategories object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class NluEnrichmentConcepts(object): - """ - An object specifiying the concepts enrichment and related parameters. - - :attr int limit: (optional) The maximum number of concepts enrichments to extact from - each instance of the specified field. - """ - - def __init__(self, limit=None): - """ - Initialize a NluEnrichmentConcepts object. - - :param int limit: (optional) The maximum number of concepts enrichments to extact - from each instance of the specified field. - """ - self.limit = limit - - @classmethod - def _from_dict(cls, _dict): - """Initialize a NluEnrichmentConcepts object from a json dictionary.""" - args = {} - if 'limit' in _dict: - args['limit'] = _dict.get('limit') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'limit') and self.limit is not None: - _dict['limit'] = self.limit - return _dict - - def __str__(self): - """Return a `str` version of this NluEnrichmentConcepts object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class NluEnrichmentEmotion(object): - """ - An object specifying the emotion detection enrichment and related parameters. - - :attr bool document: (optional) When `true`, emotion detection is performed on the - entire field. - :attr list[str] targets: (optional) A comma-separated list of target strings that will - have any associated emotions detected. - """ - - def __init__(self, document=None, targets=None): - """ - Initialize a NluEnrichmentEmotion object. - - :param bool document: (optional) When `true`, emotion detection is performed on - the entire field. - :param list[str] targets: (optional) A comma-separated list of target strings that - will have any associated emotions detected. - """ - self.document = document - self.targets = targets - - @classmethod - def _from_dict(cls, _dict): - """Initialize a NluEnrichmentEmotion object from a json dictionary.""" - args = {} - if 'document' in _dict: - args['document'] = _dict.get('document') - if 'targets' in _dict: - args['targets'] = _dict.get('targets') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'document') and self.document is not None: - _dict['document'] = self.document - if hasattr(self, 'targets') and self.targets is not None: - _dict['targets'] = self.targets - return _dict - - def __str__(self): - """Return a `str` version of this NluEnrichmentEmotion object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class NluEnrichmentEntities(object): - """ - An object speficying the Entities enrichment and related parameters. - - :attr bool sentiment: (optional) When `true`, sentiment analysis of entities will be - performed on the specified field. - :attr bool emotion: (optional) When `true`, emotion detection of entities will be - performed on the specified field. - :attr int limit: (optional) The maximum number of entities to extract for each - instance of the specified field. - :attr bool mentions: (optional) When `true`, the number of mentions of each identified - entity is recorded. The default is `false`. - :attr bool mention_types: (optional) When `true`, the types of mentions for each - idetifieid entity is recorded. The default is `false`. - :attr bool sentence_locations: (optional) When `true`, a list of sentence locations - for each instance of each identified entity is recorded. The default is `false`. - :attr str model: (optional) The enrichement model to use with entity extraction. May - be a custom model provided by Watson Knowledge Studio, the public model for use with - Knowledge Graph `en-news`, or the default public model `alchemy`. - """ - - def __init__(self, - sentiment=None, - emotion=None, - limit=None, - mentions=None, - mention_types=None, - sentence_locations=None, - model=None): - """ - Initialize a NluEnrichmentEntities object. - - :param bool sentiment: (optional) When `true`, sentiment analysis of entities will - be performed on the specified field. - :param bool emotion: (optional) When `true`, emotion detection of entities will be - performed on the specified field. - :param int limit: (optional) The maximum number of entities to extract for each - instance of the specified field. - :param bool mentions: (optional) When `true`, the number of mentions of each - identified entity is recorded. The default is `false`. - :param bool mention_types: (optional) When `true`, the types of mentions for each - idetifieid entity is recorded. The default is `false`. - :param bool sentence_locations: (optional) When `true`, a list of sentence - locations for each instance of each identified entity is recorded. The default is - `false`. - :param str model: (optional) The enrichement model to use with entity extraction. - May be a custom model provided by Watson Knowledge Studio, the public model for - use with Knowledge Graph `en-news`, or the default public model `alchemy`. - """ - self.sentiment = sentiment - self.emotion = emotion - self.limit = limit - self.mentions = mentions - self.mention_types = mention_types - self.sentence_locations = sentence_locations - self.model = model - - @classmethod - def _from_dict(cls, _dict): - """Initialize a NluEnrichmentEntities object from a json dictionary.""" - args = {} - if 'sentiment' in _dict: - args['sentiment'] = _dict.get('sentiment') - if 'emotion' in _dict: - args['emotion'] = _dict.get('emotion') - if 'limit' in _dict: - args['limit'] = _dict.get('limit') - if 'mentions' in _dict: - args['mentions'] = _dict.get('mentions') - if 'mention_types' in _dict: - args['mention_types'] = _dict.get('mention_types') - if 'sentence_locations' in _dict: - args['sentence_locations'] = _dict.get('sentence_locations') - if 'model' in _dict: - args['model'] = _dict.get('model') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'sentiment') and self.sentiment is not None: - _dict['sentiment'] = self.sentiment - if hasattr(self, 'emotion') and self.emotion is not None: - _dict['emotion'] = self.emotion - if hasattr(self, 'limit') and self.limit is not None: - _dict['limit'] = self.limit - if hasattr(self, 'mentions') and self.mentions is not None: - _dict['mentions'] = self.mentions - if hasattr(self, 'mention_types') and self.mention_types is not None: - _dict['mention_types'] = self.mention_types - if hasattr( - self, - 'sentence_locations') and self.sentence_locations is not None: - _dict['sentence_locations'] = self.sentence_locations - if hasattr(self, 'model') and self.model is not None: - _dict['model'] = self.model - return _dict - - def __str__(self): - """Return a `str` version of this NluEnrichmentEntities object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class NluEnrichmentFeatures(object): - """ - NluEnrichmentFeatures. - - :attr NluEnrichmentKeywords keywords: (optional) An object specifying the Keyword - enrichment and related parameters. - :attr NluEnrichmentEntities entities: (optional) An object speficying the Entities - enrichment and related parameters. - :attr NluEnrichmentSentiment sentiment: (optional) An object specifying the sentiment - extraction enrichment and related parameters. - :attr NluEnrichmentEmotion emotion: (optional) An object specifying the emotion - detection enrichment and related parameters. - :attr NluEnrichmentCategories categories: (optional) An object that indicates the - Categories enrichment will be applied to the specified field. - :attr NluEnrichmentSemanticRoles semantic_roles: (optional) An object specifiying the - semantic roles enrichment and related parameters. - :attr NluEnrichmentRelations relations: (optional) An object specifying the relations - enrichment and related parameters. - :attr NluEnrichmentConcepts concepts: (optional) An object specifiying the concepts - enrichment and related parameters. - """ - - def __init__(self, - keywords=None, - entities=None, - sentiment=None, - emotion=None, - categories=None, - semantic_roles=None, - relations=None, - concepts=None): - """ - Initialize a NluEnrichmentFeatures object. - - :param NluEnrichmentKeywords keywords: (optional) An object specifying the Keyword - enrichment and related parameters. - :param NluEnrichmentEntities entities: (optional) An object speficying the - Entities enrichment and related parameters. - :param NluEnrichmentSentiment sentiment: (optional) An object specifying the - sentiment extraction enrichment and related parameters. - :param NluEnrichmentEmotion emotion: (optional) An object specifying the emotion - detection enrichment and related parameters. - :param NluEnrichmentCategories categories: (optional) An object that indicates the - Categories enrichment will be applied to the specified field. - :param NluEnrichmentSemanticRoles semantic_roles: (optional) An object specifiying - the semantic roles enrichment and related parameters. - :param NluEnrichmentRelations relations: (optional) An object specifying the - relations enrichment and related parameters. - :param NluEnrichmentConcepts concepts: (optional) An object specifiying the - concepts enrichment and related parameters. - """ - self.keywords = keywords - self.entities = entities - self.sentiment = sentiment - self.emotion = emotion - self.categories = categories - self.semantic_roles = semantic_roles - self.relations = relations - self.concepts = concepts - - @classmethod - def _from_dict(cls, _dict): - """Initialize a NluEnrichmentFeatures object from a json dictionary.""" - args = {} - if 'keywords' in _dict: - args['keywords'] = NluEnrichmentKeywords._from_dict( - _dict.get('keywords')) - if 'entities' in _dict: - args['entities'] = NluEnrichmentEntities._from_dict( - _dict.get('entities')) - if 'sentiment' in _dict: - args['sentiment'] = NluEnrichmentSentiment._from_dict( - _dict.get('sentiment')) - if 'emotion' in _dict: - args['emotion'] = NluEnrichmentEmotion._from_dict( - _dict.get('emotion')) - if 'categories' in _dict: - args['categories'] = NluEnrichmentCategories._from_dict( - _dict.get('categories')) - if 'semantic_roles' in _dict: - args['semantic_roles'] = NluEnrichmentSemanticRoles._from_dict( - _dict.get('semantic_roles')) - if 'relations' in _dict: - args['relations'] = NluEnrichmentRelations._from_dict( - _dict.get('relations')) - if 'concepts' in _dict: - args['concepts'] = NluEnrichmentConcepts._from_dict( - _dict.get('concepts')) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'keywords') and self.keywords is not None: - _dict['keywords'] = self.keywords._to_dict() - if hasattr(self, 'entities') and self.entities is not None: - _dict['entities'] = self.entities._to_dict() - if hasattr(self, 'sentiment') and self.sentiment is not None: - _dict['sentiment'] = self.sentiment._to_dict() - if hasattr(self, 'emotion') and self.emotion is not None: - _dict['emotion'] = self.emotion._to_dict() - if hasattr(self, 'categories') and self.categories is not None: - _dict['categories'] = self.categories._to_dict() - if hasattr(self, 'semantic_roles') and self.semantic_roles is not None: - _dict['semantic_roles'] = self.semantic_roles._to_dict() - if hasattr(self, 'relations') and self.relations is not None: - _dict['relations'] = self.relations._to_dict() - if hasattr(self, 'concepts') and self.concepts is not None: - _dict['concepts'] = self.concepts._to_dict() - return _dict - - def __str__(self): - """Return a `str` version of this NluEnrichmentFeatures object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class NluEnrichmentKeywords(object): - """ - An object specifying the Keyword enrichment and related parameters. - - :attr bool sentiment: (optional) When `true`, sentiment analysis of keywords will be - performed on the specified field. - :attr bool emotion: (optional) When `true`, emotion detection of keywords will be - performed on the specified field. - :attr int limit: (optional) The maximum number of keywords to extract for each - instance of the specified field. - """ - - def __init__(self, sentiment=None, emotion=None, limit=None): - """ - Initialize a NluEnrichmentKeywords object. - - :param bool sentiment: (optional) When `true`, sentiment analysis of keywords will - be performed on the specified field. - :param bool emotion: (optional) When `true`, emotion detection of keywords will be - performed on the specified field. - :param int limit: (optional) The maximum number of keywords to extract for each - instance of the specified field. - """ - self.sentiment = sentiment - self.emotion = emotion - self.limit = limit - - @classmethod - def _from_dict(cls, _dict): - """Initialize a NluEnrichmentKeywords object from a json dictionary.""" - args = {} - if 'sentiment' in _dict: - args['sentiment'] = _dict.get('sentiment') - if 'emotion' in _dict: - args['emotion'] = _dict.get('emotion') - if 'limit' in _dict: - args['limit'] = _dict.get('limit') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'sentiment') and self.sentiment is not None: - _dict['sentiment'] = self.sentiment - if hasattr(self, 'emotion') and self.emotion is not None: - _dict['emotion'] = self.emotion - if hasattr(self, 'limit') and self.limit is not None: - _dict['limit'] = self.limit - return _dict - - def __str__(self): - """Return a `str` version of this NluEnrichmentKeywords object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class NluEnrichmentRelations(object): - """ - An object specifying the relations enrichment and related parameters. - - :attr str model: (optional) *For use with `natural_language_understanding` enrichments - only.* The enrichement model to use with relationship extraction. May be a custom - model provided by Watson Knowledge Studio, the public model for use with Knowledge - Graph `en-news`, the default is`en-news`. - """ - - def __init__(self, model=None): - """ - Initialize a NluEnrichmentRelations object. - - :param str model: (optional) *For use with `natural_language_understanding` - enrichments only.* The enrichement model to use with relationship extraction. May - be a custom model provided by Watson Knowledge Studio, the public model for use - with Knowledge Graph `en-news`, the default is`en-news`. - """ - self.model = model - - @classmethod - def _from_dict(cls, _dict): - """Initialize a NluEnrichmentRelations object from a json dictionary.""" - args = {} - if 'model' in _dict: - args['model'] = _dict.get('model') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'model') and self.model is not None: - _dict['model'] = self.model - return _dict - - def __str__(self): - """Return a `str` version of this NluEnrichmentRelations object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class NluEnrichmentSemanticRoles(object): - """ - An object specifiying the semantic roles enrichment and related parameters. - - :attr bool entities: (optional) When `true`, entities are extracted from the - identified sentence parts. - :attr bool keywords: (optional) When `true`, keywords are extracted from the - identified sentence parts. - :attr int limit: (optional) The maximum number of semantic roles enrichments to extact - from each instance of the specified field. - """ - - def __init__(self, entities=None, keywords=None, limit=None): - """ - Initialize a NluEnrichmentSemanticRoles object. - - :param bool entities: (optional) When `true`, entities are extracted from the - identified sentence parts. - :param bool keywords: (optional) When `true`, keywords are extracted from the - identified sentence parts. - :param int limit: (optional) The maximum number of semantic roles enrichments to - extact from each instance of the specified field. - """ - self.entities = entities - self.keywords = keywords - self.limit = limit - - @classmethod - def _from_dict(cls, _dict): - """Initialize a NluEnrichmentSemanticRoles object from a json dictionary.""" - args = {} - if 'entities' in _dict: - args['entities'] = _dict.get('entities') - if 'keywords' in _dict: - args['keywords'] = _dict.get('keywords') - if 'limit' in _dict: - args['limit'] = _dict.get('limit') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'entities') and self.entities is not None: - _dict['entities'] = self.entities - if hasattr(self, 'keywords') and self.keywords is not None: - _dict['keywords'] = self.keywords - if hasattr(self, 'limit') and self.limit is not None: - _dict['limit'] = self.limit - return _dict - - def __str__(self): - """Return a `str` version of this NluEnrichmentSemanticRoles object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class NluEnrichmentSentiment(object): - """ - An object specifying the sentiment extraction enrichment and related parameters. - - :attr bool document: (optional) When `true`, sentiment analysis is performed on the - entire field. - :attr list[str] targets: (optional) A comma-separated list of target strings that will - have any associated sentiment analyzed. - """ - - def __init__(self, document=None, targets=None): - """ - Initialize a NluEnrichmentSentiment object. - - :param bool document: (optional) When `true`, sentiment analysis is performed on - the entire field. - :param list[str] targets: (optional) A comma-separated list of target strings that - will have any associated sentiment analyzed. - """ - self.document = document - self.targets = targets - - @classmethod - def _from_dict(cls, _dict): - """Initialize a NluEnrichmentSentiment object from a json dictionary.""" - args = {} - if 'document' in _dict: - args['document'] = _dict.get('document') - if 'targets' in _dict: - args['targets'] = _dict.get('targets') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'document') and self.document is not None: - _dict['document'] = self.document - if hasattr(self, 'targets') and self.targets is not None: - _dict['targets'] = self.targets - return _dict - - def __str__(self): - """Return a `str` version of this NluEnrichmentSentiment object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class NormalizationOperation(object): - """ - NormalizationOperation. - - :attr str operation: (optional) Identifies what type of operation to perform. - **copy** - Copies the value of the **source_field** to the **destination_field** - field. If the **destination_field** already exists, then the value of the - **source_field** overwrites the original value of the **destination_field**. - **move** - Renames (moves) the **source_field** to the **destination_field**. If the - **destination_field** already exists, then the value of the **source_field** - overwrites the original value of the **destination_field**. Rename is identical to - copy, except that the **source_field** is removed after the value has been copied to - the **destination_field** (it is the same as a _copy_ followed by a _remove_). - **merge** - Merges the value of the **source_field** with the value of the - **destination_field**. The **destination_field** is converted into an array if it is - not already an array, and the value of the **source_field** is appended to the array. - This operation removes the **source_field** after the merge. If the **source_field** - does not exist in the current document, then the **destination_field** is still - converted into an array (if it is not an array already). This conversion ensures the - type for **destination_field** is consistent across all documents. - **remove** - Deletes the **source_field** field. The **destination_field** is ignored - for this operation. - **remove_nulls** - Removes all nested null (blank) field values from the ingested - document. **source_field** and **destination_field** are ignored by this operation - because _remove_nulls_ operates on the entire ingested document. Typically, - **remove_nulls** is invoked as the last normalization operation (if it is invoked at - all, it can be time-expensive). - :attr str source_field: (optional) The source field for the operation. - :attr str destination_field: (optional) The destination field for the operation. - """ - - def __init__(self, - operation=None, - source_field=None, - destination_field=None): - """ - Initialize a NormalizationOperation object. - - :param str operation: (optional) Identifies what type of operation to perform. - **copy** - Copies the value of the **source_field** to the **destination_field** - field. If the **destination_field** already exists, then the value of the - **source_field** overwrites the original value of the **destination_field**. - **move** - Renames (moves) the **source_field** to the **destination_field**. If - the **destination_field** already exists, then the value of the **source_field** - overwrites the original value of the **destination_field**. Rename is identical to - copy, except that the **source_field** is removed after the value has been copied - to the **destination_field** (it is the same as a _copy_ followed by a _remove_). - **merge** - Merges the value of the **source_field** with the value of the - **destination_field**. The **destination_field** is converted into an array if it - is not already an array, and the value of the **source_field** is appended to the - array. This operation removes the **source_field** after the merge. If the - **source_field** does not exist in the current document, then the - **destination_field** is still converted into an array (if it is not an array - already). This conversion ensures the type for **destination_field** is consistent - across all documents. - **remove** - Deletes the **source_field** field. The **destination_field** is - ignored for this operation. - **remove_nulls** - Removes all nested null (blank) field values from the ingested - document. **source_field** and **destination_field** are ignored by this operation - because _remove_nulls_ operates on the entire ingested document. Typically, - **remove_nulls** is invoked as the last normalization operation (if it is invoked - at all, it can be time-expensive). - :param str source_field: (optional) The source field for the operation. - :param str destination_field: (optional) The destination field for the operation. - """ - self.operation = operation - self.source_field = source_field - self.destination_field = destination_field - - @classmethod - def _from_dict(cls, _dict): - """Initialize a NormalizationOperation object from a json dictionary.""" - args = {} - if 'operation' in _dict: - args['operation'] = _dict.get('operation') - if 'source_field' in _dict: - args['source_field'] = _dict.get('source_field') - if 'destination_field' in _dict: - args['destination_field'] = _dict.get('destination_field') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'operation') and self.operation is not None: - _dict['operation'] = self.operation - if hasattr(self, 'source_field') and self.source_field is not None: - _dict['source_field'] = self.source_field - if hasattr(self, - 'destination_field') and self.destination_field is not None: - _dict['destination_field'] = self.destination_field - return _dict - - def __str__(self): - """Return a `str` version of this NormalizationOperation object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Notice(object): - """ - A notice produced for the collection. - - :attr str notice_id: (optional) Identifies the notice. Many notices might have the - same ID. This field exists so that user applications can programmatically identify a - notice and take automatic corrective action. Typical notice IDs include: - `index_failed`, `index_failed_too_many_requests`, `index_failed_incompatible_field`, - `index_failed_cluster_unavailable`, `ingestion_timeout`, `ingestion_error`, - `bad_request`, `internal_error`, `missing_model`, `unsupported_model`, - `smart_document_understanding_failed_incompatible_field`, - `smart_document_understanding_failed_internal_error`, - `smart_document_understanding_failed_internal_error`, - `smart_document_understanding_failed_warning`, - `smart_document_understanding_page_error`, - `smart_document_understanding_page_warning`. **Note:** This is not a complete list, - other values might be returned. - :attr datetime created: (optional) The creation date of the collection in the format - yyyy-MM-dd'T'HH:mm:ss.SSS'Z'. - :attr str document_id: (optional) Unique identifier of the document. - :attr str query_id: (optional) Unique identifier of the query used for relevance - training. - :attr str severity: (optional) Severity level of the notice. - :attr str step: (optional) Ingestion or training step in which the notice occurred. - Typical step values include: `classify_elements`, `smartDocumentUnderstanding`, - `ingestion`, `indexing`, `convert`. **Note:** This is not a complete list, other - values might be returned. - :attr str description: (optional) The description of the notice. - """ - - def __init__(self, - notice_id=None, - created=None, - document_id=None, - query_id=None, - severity=None, - step=None, - description=None): - """ - Initialize a Notice object. - - :param str notice_id: (optional) Identifies the notice. Many notices might have - the same ID. This field exists so that user applications can programmatically - identify a notice and take automatic corrective action. Typical notice IDs - include: `index_failed`, `index_failed_too_many_requests`, - `index_failed_incompatible_field`, `index_failed_cluster_unavailable`, - `ingestion_timeout`, `ingestion_error`, `bad_request`, `internal_error`, - `missing_model`, `unsupported_model`, - `smart_document_understanding_failed_incompatible_field`, - `smart_document_understanding_failed_internal_error`, - `smart_document_understanding_failed_internal_error`, - `smart_document_understanding_failed_warning`, - `smart_document_understanding_page_error`, - `smart_document_understanding_page_warning`. **Note:** This is not a complete - list, other values might be returned. - :param datetime created: (optional) The creation date of the collection in the - format yyyy-MM-dd'T'HH:mm:ss.SSS'Z'. - :param str document_id: (optional) Unique identifier of the document. - :param str query_id: (optional) Unique identifier of the query used for relevance - training. - :param str severity: (optional) Severity level of the notice. - :param str step: (optional) Ingestion or training step in which the notice - occurred. Typical step values include: `classify_elements`, - `smartDocumentUnderstanding`, `ingestion`, `indexing`, `convert`. **Note:** This - is not a complete list, other values might be returned. - :param str description: (optional) The description of the notice. - """ - self.notice_id = notice_id - self.created = created - self.document_id = document_id - self.query_id = query_id - self.severity = severity - self.step = step - self.description = description - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Notice object from a json dictionary.""" - args = {} - if 'notice_id' in _dict: - args['notice_id'] = _dict.get('notice_id') - if 'created' in _dict: - args['created'] = string_to_datetime(_dict.get('created')) - if 'document_id' in _dict: - args['document_id'] = _dict.get('document_id') - if 'query_id' in _dict: - args['query_id'] = _dict.get('query_id') - if 'severity' in _dict: - args['severity'] = _dict.get('severity') - if 'step' in _dict: - args['step'] = _dict.get('step') - if 'description' in _dict: - args['description'] = _dict.get('description') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'notice_id') and self.notice_id is not None: - _dict['notice_id'] = self.notice_id - if hasattr(self, 'created') and self.created is not None: - _dict['created'] = datetime_to_string(self.created) - if hasattr(self, 'document_id') and self.document_id is not None: - _dict['document_id'] = self.document_id - if hasattr(self, 'query_id') and self.query_id is not None: - _dict['query_id'] = self.query_id - if hasattr(self, 'severity') and self.severity is not None: - _dict['severity'] = self.severity - if hasattr(self, 'step') and self.step is not None: - _dict['step'] = self.step - if hasattr(self, 'description') and self.description is not None: - _dict['description'] = self.description - return _dict - - def __str__(self): - """Return a `str` version of this Notice object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class PdfHeadingDetection(object): - """ - PdfHeadingDetection. - - :attr list[FontSetting] fonts: (optional) - """ - - def __init__(self, fonts=None): - """ - Initialize a PdfHeadingDetection object. - - :param list[FontSetting] fonts: (optional) - """ - self.fonts = fonts - - @classmethod - def _from_dict(cls, _dict): - """Initialize a PdfHeadingDetection object from a json dictionary.""" - args = {} - if 'fonts' in _dict: - args['fonts'] = [ - FontSetting._from_dict(x) for x in (_dict.get('fonts')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'fonts') and self.fonts is not None: - _dict['fonts'] = [x._to_dict() for x in self.fonts] - return _dict - - def __str__(self): - """Return a `str` version of this PdfHeadingDetection object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class PdfSettings(object): - """ - A list of PDF conversion settings. - - :attr PdfHeadingDetection heading: (optional) - """ - - def __init__(self, heading=None): - """ - Initialize a PdfSettings object. - - :param PdfHeadingDetection heading: (optional) - """ - self.heading = heading - - @classmethod - def _from_dict(cls, _dict): - """Initialize a PdfSettings object from a json dictionary.""" - args = {} - if 'heading' in _dict: - args['heading'] = PdfHeadingDetection._from_dict( - _dict.get('heading')) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'heading') and self.heading is not None: - _dict['heading'] = self.heading._to_dict() - return _dict - - def __str__(self): - """Return a `str` version of this PdfSettings object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class QueryAggregation(object): - """ - An aggregation produced by the Discovery service to analyze the input provided. - - :attr str type: (optional) The type of aggregation command used. For example: term, - filter, max, min, etc. - :attr list[AggregationResult] results: (optional) Array of aggregation results. - :attr int matching_results: (optional) Number of matching results. - :attr list[QueryAggregation] aggregations: (optional) Aggregations returned by the - Discovery service. - """ - - def __init__(self, - type=None, - results=None, - matching_results=None, - aggregations=None): - """ - Initialize a QueryAggregation object. - - :param str type: (optional) The type of aggregation command used. For example: - term, filter, max, min, etc. - :param list[AggregationResult] results: (optional) Array of aggregation results. - :param int matching_results: (optional) Number of matching results. - :param list[QueryAggregation] aggregations: (optional) Aggregations returned by - the Discovery service. - """ - self.type = type - self.results = results - self.matching_results = matching_results - self.aggregations = aggregations - - @classmethod - def _from_dict(cls, _dict): - """Initialize a QueryAggregation object from a json dictionary.""" - args = {} - if 'type' in _dict: - args['type'] = _dict.get('type') - if 'results' in _dict: - args['results'] = [ - AggregationResult._from_dict(x) for x in (_dict.get('results')) - ] - if 'matching_results' in _dict: - args['matching_results'] = _dict.get('matching_results') - if 'aggregations' in _dict: - args['aggregations'] = [ - QueryAggregation._from_dict(x) - for x in (_dict.get('aggregations')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'type') and self.type is not None: - _dict['type'] = self.type - if hasattr(self, 'results') and self.results is not None: - _dict['results'] = [x._to_dict() for x in self.results] - if hasattr(self, - 'matching_results') and self.matching_results is not None: - _dict['matching_results'] = self.matching_results - if hasattr(self, 'aggregations') and self.aggregations is not None: - _dict['aggregations'] = [x._to_dict() for x in self.aggregations] - return _dict - - def __str__(self): - """Return a `str` version of this QueryAggregation object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class QueryEntitiesContext(object): - """ - Entity text to provide context for the queried entity and rank based on that - association. For example, if you wanted to query the city of London in England your - query would look for `London` with the context of `England`. - - :attr str text: (optional) Entity text to provide context for the queried entity and - rank based on that association. For example, if you wanted to query the city of London - in England your query would look for `London` with the context of `England`. - """ - - def __init__(self, text=None): - """ - Initialize a QueryEntitiesContext object. - - :param str text: (optional) Entity text to provide context for the queried entity - and rank based on that association. For example, if you wanted to query the city - of London in England your query would look for `London` with the context of - `England`. - """ - self.text = text - - @classmethod - def _from_dict(cls, _dict): - """Initialize a QueryEntitiesContext object from a json dictionary.""" - args = {} - if 'text' in _dict: - args['text'] = _dict.get('text') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - return _dict - - def __str__(self): - """Return a `str` version of this QueryEntitiesContext object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class QueryEntitiesEntity(object): - """ - A text string that appears within the entity text field. - - :attr str text: (optional) Entity text content. - :attr str type: (optional) The type of the specified entity. - """ - - def __init__(self, text=None, type=None): - """ - Initialize a QueryEntitiesEntity object. - - :param str text: (optional) Entity text content. - :param str type: (optional) The type of the specified entity. - """ - self.text = text - self.type = type - - @classmethod - def _from_dict(cls, _dict): - """Initialize a QueryEntitiesEntity object from a json dictionary.""" - args = {} - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'type' in _dict: - args['type'] = _dict.get('type') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - if hasattr(self, 'type') and self.type is not None: - _dict['type'] = self.type - return _dict - - def __str__(self): - """Return a `str` version of this QueryEntitiesEntity object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class QueryEntitiesResponse(object): - """ - An object that contains an array of entities resulting from the query. - - :attr list[QueryEntitiesResponseItem] entities: (optional) Array of entities that - results from the query. - """ - - def __init__(self, entities=None): - """ - Initialize a QueryEntitiesResponse object. - - :param list[QueryEntitiesResponseItem] entities: (optional) Array of entities that - results from the query. - """ - self.entities = entities - - @classmethod - def _from_dict(cls, _dict): - """Initialize a QueryEntitiesResponse object from a json dictionary.""" - args = {} - if 'entities' in _dict: - args['entities'] = [ - QueryEntitiesResponseItem._from_dict(x) - for x in (_dict.get('entities')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'entities') and self.entities is not None: - _dict['entities'] = [x._to_dict() for x in self.entities] - return _dict - - def __str__(self): - """Return a `str` version of this QueryEntitiesResponse object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class QueryEntitiesResponseItem(object): - """ - Object containing Entity query response information. - - :attr str text: (optional) Entity text content. - :attr str type: (optional) The type of the result entity. - :attr list[QueryEvidence] evidence: (optional) List of different evidentiary items to - support the result. - """ - - def __init__(self, text=None, type=None, evidence=None): - """ - Initialize a QueryEntitiesResponseItem object. - - :param str text: (optional) Entity text content. - :param str type: (optional) The type of the result entity. - :param list[QueryEvidence] evidence: (optional) List of different evidentiary - items to support the result. - """ - self.text = text - self.type = type - self.evidence = evidence - - @classmethod - def _from_dict(cls, _dict): - """Initialize a QueryEntitiesResponseItem object from a json dictionary.""" - args = {} - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'type' in _dict: - args['type'] = _dict.get('type') - if 'evidence' in _dict: - args['evidence'] = [ - QueryEvidence._from_dict(x) for x in (_dict.get('evidence')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - if hasattr(self, 'type') and self.type is not None: - _dict['type'] = self.type - if hasattr(self, 'evidence') and self.evidence is not None: - _dict['evidence'] = [x._to_dict() for x in self.evidence] - return _dict - - def __str__(self): - """Return a `str` version of this QueryEntitiesResponseItem object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class QueryEvidence(object): - """ - Description of evidence location supporting Knoweldge Graph query result. - - :attr str document_id: (optional) The docuemnt ID (as indexed in Discovery) of the - evidence location. - :attr str field: (optional) The field of the document where the supporting evidence - was identified. - :attr int start_offset: (optional) The start location of the evidence in the - identified field. This value is inclusive. - :attr int end_offset: (optional) The end location of the evidence in the identified - field. This value is inclusive. - :attr list[QueryEvidenceEntity] entities: (optional) An array of entity objects that - show evidence of the result. - """ - - def __init__(self, - document_id=None, - field=None, - start_offset=None, - end_offset=None, - entities=None): - """ - Initialize a QueryEvidence object. - - :param str document_id: (optional) The docuemnt ID (as indexed in Discovery) of - the evidence location. - :param str field: (optional) The field of the document where the supporting - evidence was identified. - :param int start_offset: (optional) The start location of the evidence in the - identified field. This value is inclusive. - :param int end_offset: (optional) The end location of the evidence in the - identified field. This value is inclusive. - :param list[QueryEvidenceEntity] entities: (optional) An array of entity objects - that show evidence of the result. - """ - self.document_id = document_id - self.field = field - self.start_offset = start_offset - self.end_offset = end_offset - self.entities = entities - - @classmethod - def _from_dict(cls, _dict): - """Initialize a QueryEvidence object from a json dictionary.""" - args = {} - if 'document_id' in _dict: - args['document_id'] = _dict.get('document_id') - if 'field' in _dict: - args['field'] = _dict.get('field') - if 'start_offset' in _dict: - args['start_offset'] = _dict.get('start_offset') - if 'end_offset' in _dict: - args['end_offset'] = _dict.get('end_offset') - if 'entities' in _dict: - args['entities'] = [ - QueryEvidenceEntity._from_dict(x) - for x in (_dict.get('entities')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'document_id') and self.document_id is not None: - _dict['document_id'] = self.document_id - if hasattr(self, 'field') and self.field is not None: - _dict['field'] = self.field - if hasattr(self, 'start_offset') and self.start_offset is not None: - _dict['start_offset'] = self.start_offset - if hasattr(self, 'end_offset') and self.end_offset is not None: - _dict['end_offset'] = self.end_offset - if hasattr(self, 'entities') and self.entities is not None: - _dict['entities'] = [x._to_dict() for x in self.entities] - return _dict - - def __str__(self): - """Return a `str` version of this QueryEvidence object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class QueryEvidenceEntity(object): - """ - Entity description and location within evidence field. - - :attr str type: (optional) The entity type for this entity. Possible types vary based - on model used. - :attr str text: (optional) The original text of this entity as found in the evidence - field. - :attr int start_offset: (optional) The start location of the entity text in the - identified field. This value is inclusive. - :attr int end_offset: (optional) The end location of the entity text in the identified - field. This value is exclusive. - """ - - def __init__(self, type=None, text=None, start_offset=None, - end_offset=None): - """ - Initialize a QueryEvidenceEntity object. - - :param str type: (optional) The entity type for this entity. Possible types vary - based on model used. - :param str text: (optional) The original text of this entity as found in the - evidence field. - :param int start_offset: (optional) The start location of the entity text in the - identified field. This value is inclusive. - :param int end_offset: (optional) The end location of the entity text in the - identified field. This value is exclusive. - """ - self.type = type - self.text = text - self.start_offset = start_offset - self.end_offset = end_offset - - @classmethod - def _from_dict(cls, _dict): - """Initialize a QueryEvidenceEntity object from a json dictionary.""" - args = {} - if 'type' in _dict: - args['type'] = _dict.get('type') - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'start_offset' in _dict: - args['start_offset'] = _dict.get('start_offset') - if 'end_offset' in _dict: - args['end_offset'] = _dict.get('end_offset') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'type') and self.type is not None: - _dict['type'] = self.type - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - if hasattr(self, 'start_offset') and self.start_offset is not None: - _dict['start_offset'] = self.start_offset - if hasattr(self, 'end_offset') and self.end_offset is not None: - _dict['end_offset'] = self.end_offset - return _dict - - def __str__(self): - """Return a `str` version of this QueryEvidenceEntity object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class QueryFilterType(object): - """ - QueryFilterType. - - :attr list[str] exclude: (optional) A comma-separated list of types to exclude. - :attr list[str] include: (optional) A comma-separated list of types to include. All - other types are excluded. - """ - - def __init__(self, exclude=None, include=None): - """ - Initialize a QueryFilterType object. - - :param list[str] exclude: (optional) A comma-separated list of types to exclude. - :param list[str] include: (optional) A comma-separated list of types to include. - All other types are excluded. - """ - self.exclude = exclude - self.include = include - - @classmethod - def _from_dict(cls, _dict): - """Initialize a QueryFilterType object from a json dictionary.""" - args = {} - if 'exclude' in _dict: - args['exclude'] = _dict.get('exclude') - if 'include' in _dict: - args['include'] = _dict.get('include') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'exclude') and self.exclude is not None: - _dict['exclude'] = self.exclude - if hasattr(self, 'include') and self.include is not None: - _dict['include'] = self.include - return _dict - - def __str__(self): - """Return a `str` version of this QueryFilterType object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class QueryNoticesResponse(object): - """ - QueryNoticesResponse. - - :attr int matching_results: (optional) The number of matching results. - :attr list[QueryNoticesResult] results: (optional) Array of document results that - match the query. - :attr list[QueryAggregation] aggregations: (optional) Array of aggregation results - that match the query. - :attr list[QueryPassages] passages: (optional) Array of passage results that match the - query. - :attr int duplicates_removed: (optional) The number of duplicates removed from this - notices query. - """ - - def __init__(self, - matching_results=None, - results=None, - aggregations=None, - passages=None, - duplicates_removed=None): - """ - Initialize a QueryNoticesResponse object. - - :param int matching_results: (optional) The number of matching results. - :param list[QueryNoticesResult] results: (optional) Array of document results that - match the query. - :param list[QueryAggregation] aggregations: (optional) Array of aggregation - results that match the query. - :param list[QueryPassages] passages: (optional) Array of passage results that - match the query. - :param int duplicates_removed: (optional) The number of duplicates removed from - this notices query. - """ - self.matching_results = matching_results - self.results = results - self.aggregations = aggregations - self.passages = passages - self.duplicates_removed = duplicates_removed - - @classmethod - def _from_dict(cls, _dict): - """Initialize a QueryNoticesResponse object from a json dictionary.""" - args = {} - if 'matching_results' in _dict: - args['matching_results'] = _dict.get('matching_results') - if 'results' in _dict: - args['results'] = [ - QueryNoticesResult._from_dict(x) for x in (_dict.get('results')) - ] - if 'aggregations' in _dict: - args['aggregations'] = [ - QueryAggregation._from_dict(x) - for x in (_dict.get('aggregations')) - ] - if 'passages' in _dict: - args['passages'] = [ - QueryPassages._from_dict(x) for x in (_dict.get('passages')) - ] - if 'duplicates_removed' in _dict: - args['duplicates_removed'] = _dict.get('duplicates_removed') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, - 'matching_results') and self.matching_results is not None: - _dict['matching_results'] = self.matching_results - if hasattr(self, 'results') and self.results is not None: - _dict['results'] = [x._to_dict() for x in self.results] - if hasattr(self, 'aggregations') and self.aggregations is not None: - _dict['aggregations'] = [x._to_dict() for x in self.aggregations] - if hasattr(self, 'passages') and self.passages is not None: - _dict['passages'] = [x._to_dict() for x in self.passages] - if hasattr( - self, - 'duplicates_removed') and self.duplicates_removed is not None: - _dict['duplicates_removed'] = self.duplicates_removed - return _dict - - def __str__(self): - """Return a `str` version of this QueryNoticesResponse object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class QueryNoticesResult(object): - """ - QueryNoticesResult. - - :attr str id: (optional) The unique identifier of the document. - :attr dict metadata: (optional) Metadata of the document. - :attr str collection_id: (optional) The collection ID of the collection containing the - document for this result. - :attr QueryResultMetadata result_metadata: (optional) Metadata of a query result. - :attr str title: (optional) Automatically extracted result title. - :attr int code: (optional) The internal status code returned by the ingestion - subsystem indicating the overall result of ingesting the source document. - :attr str filename: (optional) Name of the original source file (if available). - :attr str file_type: (optional) The type of the original source file. - :attr str sha1: (optional) The SHA-1 hash of the original source file (formatted as a - hexadecimal string). - :attr list[Notice] notices: (optional) Array of notices for the document. - """ - - def __init__(self, - id=None, - metadata=None, - collection_id=None, - result_metadata=None, - title=None, - code=None, - filename=None, - file_type=None, - sha1=None, - notices=None, - **kwargs): - """ - Initialize a QueryNoticesResult object. - - :param str id: (optional) The unique identifier of the document. - :param dict metadata: (optional) Metadata of the document. - :param str collection_id: (optional) The collection ID of the collection - containing the document for this result. - :param QueryResultMetadata result_metadata: (optional) Metadata of a query result. - :param str title: (optional) Automatically extracted result title. - :param int code: (optional) The internal status code returned by the ingestion - subsystem indicating the overall result of ingesting the source document. - :param str filename: (optional) Name of the original source file (if available). - :param str file_type: (optional) The type of the original source file. - :param str sha1: (optional) The SHA-1 hash of the original source file (formatted - as a hexadecimal string). - :param list[Notice] notices: (optional) Array of notices for the document. - :param **kwargs: (optional) Any additional properties. - """ - self.id = id - self.metadata = metadata - self.collection_id = collection_id - self.result_metadata = result_metadata - self.title = title - self.code = code - self.filename = filename - self.file_type = file_type - self.sha1 = sha1 - self.notices = notices - for _key, _value in kwargs.items(): - setattr(self, _key, _value) - - @classmethod - def _from_dict(cls, _dict): - """Initialize a QueryNoticesResult object from a json dictionary.""" - args = {} - xtra = _dict.copy() - if 'id' in _dict: - args['id'] = _dict.get('id') - del xtra['id'] - if 'metadata' in _dict: - args['metadata'] = _dict.get('metadata') - del xtra['metadata'] - if 'collection_id' in _dict: - args['collection_id'] = _dict.get('collection_id') - del xtra['collection_id'] - if 'result_metadata' in _dict: - args['result_metadata'] = QueryResultMetadata._from_dict( - _dict.get('result_metadata')) - del xtra['result_metadata'] - if 'title' in _dict: - args['title'] = _dict.get('title') - del xtra['title'] - if 'code' in _dict: - args['code'] = _dict.get('code') - del xtra['code'] - if 'filename' in _dict: - args['filename'] = _dict.get('filename') - del xtra['filename'] - if 'file_type' in _dict: - args['file_type'] = _dict.get('file_type') - del xtra['file_type'] - if 'sha1' in _dict: - args['sha1'] = _dict.get('sha1') - del xtra['sha1'] - if 'notices' in _dict: - args['notices'] = [ - Notice._from_dict(x) for x in (_dict.get('notices')) - ] - del xtra['notices'] - args.update(xtra) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'id') and self.id is not None: - _dict['id'] = self.id - if hasattr(self, 'metadata') and self.metadata is not None: - _dict['metadata'] = self.metadata - if hasattr(self, 'collection_id') and self.collection_id is not None: - _dict['collection_id'] = self.collection_id - if hasattr(self, - 'result_metadata') and self.result_metadata is not None: - _dict['result_metadata'] = self.result_metadata._to_dict() - if hasattr(self, 'title') and self.title is not None: - _dict['title'] = self.title - if hasattr(self, 'code') and self.code is not None: - _dict['code'] = self.code - if hasattr(self, 'filename') and self.filename is not None: - _dict['filename'] = self.filename - if hasattr(self, 'file_type') and self.file_type is not None: - _dict['file_type'] = self.file_type - if hasattr(self, 'sha1') and self.sha1 is not None: - _dict['sha1'] = self.sha1 - if hasattr(self, 'notices') and self.notices is not None: - _dict['notices'] = [x._to_dict() for x in self.notices] - if hasattr(self, '_additionalProperties'): - for _key in self._additionalProperties: - _value = getattr(self, _key, None) - if _value is not None: - _dict[_key] = _value - return _dict - - def __setattr__(self, name, value): - properties = { - 'id', 'metadata', 'collection_id', 'result_metadata', 'title', - 'code', 'filename', 'file_type', 'sha1', 'notices' - } - if not hasattr(self, '_additionalProperties'): - super(QueryNoticesResult, self).__setattr__('_additionalProperties', - set()) - if name not in properties: - self._additionalProperties.add(name) - super(QueryNoticesResult, self).__setattr__(name, value) - - def __str__(self): - """Return a `str` version of this QueryNoticesResult object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class QueryPassages(object): - """ - QueryPassages. - - :attr str document_id: (optional) The unique identifier of the document from which the - passage has been extracted. - :attr float passage_score: (optional) The confidence score of the passages's analysis. - A higher score indicates greater confidence. - :attr str passage_text: (optional) The content of the extracted passage. - :attr int start_offset: (optional) The position of the first character of the - extracted passage in the originating field. - :attr int end_offset: (optional) The position of the last character of the extracted - passage in the originating field. - :attr str field: (optional) The label of the field from which the passage has been - extracted. - """ - - def __init__(self, - document_id=None, - passage_score=None, - passage_text=None, - start_offset=None, - end_offset=None, - field=None): - """ - Initialize a QueryPassages object. - - :param str document_id: (optional) The unique identifier of the document from - which the passage has been extracted. - :param float passage_score: (optional) The confidence score of the passages's - analysis. A higher score indicates greater confidence. - :param str passage_text: (optional) The content of the extracted passage. - :param int start_offset: (optional) The position of the first character of the - extracted passage in the originating field. - :param int end_offset: (optional) The position of the last character of the - extracted passage in the originating field. - :param str field: (optional) The label of the field from which the passage has - been extracted. - """ - self.document_id = document_id - self.passage_score = passage_score - self.passage_text = passage_text - self.start_offset = start_offset - self.end_offset = end_offset - self.field = field - - @classmethod - def _from_dict(cls, _dict): - """Initialize a QueryPassages object from a json dictionary.""" - args = {} - if 'document_id' in _dict: - args['document_id'] = _dict.get('document_id') - if 'passage_score' in _dict: - args['passage_score'] = _dict.get('passage_score') - if 'passage_text' in _dict: - args['passage_text'] = _dict.get('passage_text') - if 'start_offset' in _dict: - args['start_offset'] = _dict.get('start_offset') - if 'end_offset' in _dict: - args['end_offset'] = _dict.get('end_offset') - if 'field' in _dict: - args['field'] = _dict.get('field') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'document_id') and self.document_id is not None: - _dict['document_id'] = self.document_id - if hasattr(self, 'passage_score') and self.passage_score is not None: - _dict['passage_score'] = self.passage_score - if hasattr(self, 'passage_text') and self.passage_text is not None: - _dict['passage_text'] = self.passage_text - if hasattr(self, 'start_offset') and self.start_offset is not None: - _dict['start_offset'] = self.start_offset - if hasattr(self, 'end_offset') and self.end_offset is not None: - _dict['end_offset'] = self.end_offset - if hasattr(self, 'field') and self.field is not None: - _dict['field'] = self.field - return _dict - - def __str__(self): - """Return a `str` version of this QueryPassages object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class QueryRelationsArgument(object): - """ - QueryRelationsArgument. - - :attr list[QueryEntitiesEntity] entities: (optional) Array of query entities. - """ - - def __init__(self, entities=None): - """ - Initialize a QueryRelationsArgument object. - - :param list[QueryEntitiesEntity] entities: (optional) Array of query entities. - """ - self.entities = entities - - @classmethod - def _from_dict(cls, _dict): - """Initialize a QueryRelationsArgument object from a json dictionary.""" - args = {} - if 'entities' in _dict: - args['entities'] = [ - QueryEntitiesEntity._from_dict(x) - for x in (_dict.get('entities')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'entities') and self.entities is not None: - _dict['entities'] = [x._to_dict() for x in self.entities] - return _dict - - def __str__(self): - """Return a `str` version of this QueryRelationsArgument object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class QueryRelationsEntity(object): - """ - QueryRelationsEntity. - - :attr str text: (optional) Entity text content. - :attr str type: (optional) The type of the specified entity. - :attr bool exact: (optional) If false, implicit querying is performed. The default is - `false`. - """ - - def __init__(self, text=None, type=None, exact=None): - """ - Initialize a QueryRelationsEntity object. - - :param str text: (optional) Entity text content. - :param str type: (optional) The type of the specified entity. - :param bool exact: (optional) If false, implicit querying is performed. The - default is `false`. - """ - self.text = text - self.type = type - self.exact = exact - - @classmethod - def _from_dict(cls, _dict): - """Initialize a QueryRelationsEntity object from a json dictionary.""" - args = {} - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'type' in _dict: - args['type'] = _dict.get('type') - if 'exact' in _dict: - args['exact'] = _dict.get('exact') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - if hasattr(self, 'type') and self.type is not None: - _dict['type'] = self.type - if hasattr(self, 'exact') and self.exact is not None: - _dict['exact'] = self.exact - return _dict - - def __str__(self): - """Return a `str` version of this QueryRelationsEntity object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class QueryRelationsFilter(object): - """ - QueryRelationsFilter. - - :attr QueryFilterType relation_types: (optional) - :attr QueryFilterType entity_types: (optional) - :attr list[str] document_ids: (optional) A comma-separated list of document IDs to - include in the query. - """ - - def __init__(self, - relation_types=None, - entity_types=None, - document_ids=None): - """ - Initialize a QueryRelationsFilter object. - - :param QueryFilterType relation_types: (optional) - :param QueryFilterType entity_types: (optional) - :param list[str] document_ids: (optional) A comma-separated list of document IDs - to include in the query. - """ - self.relation_types = relation_types - self.entity_types = entity_types - self.document_ids = document_ids - - @classmethod - def _from_dict(cls, _dict): - """Initialize a QueryRelationsFilter object from a json dictionary.""" - args = {} - if 'relation_types' in _dict: - args['relation_types'] = QueryFilterType._from_dict( - _dict.get('relation_types')) - if 'entity_types' in _dict: - args['entity_types'] = QueryFilterType._from_dict( - _dict.get('entity_types')) - if 'document_ids' in _dict: - args['document_ids'] = _dict.get('document_ids') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'relation_types') and self.relation_types is not None: - _dict['relation_types'] = self.relation_types._to_dict() - if hasattr(self, 'entity_types') and self.entity_types is not None: - _dict['entity_types'] = self.entity_types._to_dict() - if hasattr(self, 'document_ids') and self.document_ids is not None: - _dict['document_ids'] = self.document_ids - return _dict - - def __str__(self): - """Return a `str` version of this QueryRelationsFilter object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class QueryRelationsRelationship(object): - """ - QueryRelationsRelationship. - - :attr str type: (optional) The identified relationship type. - :attr int frequency: (optional) The number of times the relationship is mentioned. - :attr list[QueryRelationsArgument] arguments: (optional) Information about the - relationship. - :attr list[QueryEvidence] evidence: (optional) List of different evidentiary items to - support the result. - """ - - def __init__(self, type=None, frequency=None, arguments=None, - evidence=None): - """ - Initialize a QueryRelationsRelationship object. - - :param str type: (optional) The identified relationship type. - :param int frequency: (optional) The number of times the relationship is - mentioned. - :param list[QueryRelationsArgument] arguments: (optional) Information about the - relationship. - :param list[QueryEvidence] evidence: (optional) List of different evidentiary - items to support the result. - """ - self.type = type - self.frequency = frequency - self.arguments = arguments - self.evidence = evidence - - @classmethod - def _from_dict(cls, _dict): - """Initialize a QueryRelationsRelationship object from a json dictionary.""" - args = {} - if 'type' in _dict: - args['type'] = _dict.get('type') - if 'frequency' in _dict: - args['frequency'] = _dict.get('frequency') - if 'arguments' in _dict: - args['arguments'] = [ - QueryRelationsArgument._from_dict(x) - for x in (_dict.get('arguments')) - ] - if 'evidence' in _dict: - args['evidence'] = [ - QueryEvidence._from_dict(x) for x in (_dict.get('evidence')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'type') and self.type is not None: - _dict['type'] = self.type - if hasattr(self, 'frequency') and self.frequency is not None: - _dict['frequency'] = self.frequency - if hasattr(self, 'arguments') and self.arguments is not None: - _dict['arguments'] = [x._to_dict() for x in self.arguments] - if hasattr(self, 'evidence') and self.evidence is not None: - _dict['evidence'] = [x._to_dict() for x in self.evidence] - return _dict - - def __str__(self): - """Return a `str` version of this QueryRelationsRelationship object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class QueryRelationsResponse(object): - """ - QueryRelationsResponse. - - :attr list[QueryRelationsRelationship] relations: (optional) Array of relationships - for the relations query. - """ - - def __init__(self, relations=None): - """ - Initialize a QueryRelationsResponse object. - - :param list[QueryRelationsRelationship] relations: (optional) Array of - relationships for the relations query. - """ - self.relations = relations - - @classmethod - def _from_dict(cls, _dict): - """Initialize a QueryRelationsResponse object from a json dictionary.""" - args = {} - if 'relations' in _dict: - args['relations'] = [ - QueryRelationsRelationship._from_dict(x) - for x in (_dict.get('relations')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'relations') and self.relations is not None: - _dict['relations'] = [x._to_dict() for x in self.relations] - return _dict - - def __str__(self): - """Return a `str` version of this QueryRelationsResponse object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class QueryResponse(object): - """ - A response containing the documents and aggregations for the query. - - :attr int matching_results: (optional) The number of matching results for the query. - :attr list[QueryResult] results: (optional) Array of document results for the query. - :attr list[QueryAggregation] aggregations: (optional) Array of aggregation results for - the query. - :attr list[QueryPassages] passages: (optional) Array of passage results for the query. - :attr int duplicates_removed: (optional) The number of duplicate results removed. - :attr str session_token: (optional) The session token for this query. The session - token can be used to add events associated with this query to the query and event log. - **Important:** Session tokens are case sensitive. - :attr RetrievalDetails retrieval_details: (optional) An object contain retrieval type - information. - """ - - def __init__(self, - matching_results=None, - results=None, - aggregations=None, - passages=None, - duplicates_removed=None, - session_token=None, - retrieval_details=None): - """ - Initialize a QueryResponse object. - - :param int matching_results: (optional) The number of matching results for the - query. - :param list[QueryResult] results: (optional) Array of document results for the - query. - :param list[QueryAggregation] aggregations: (optional) Array of aggregation - results for the query. - :param list[QueryPassages] passages: (optional) Array of passage results for the - query. - :param int duplicates_removed: (optional) The number of duplicate results removed. - :param str session_token: (optional) The session token for this query. The session - token can be used to add events associated with this query to the query and event - log. - **Important:** Session tokens are case sensitive. - :param RetrievalDetails retrieval_details: (optional) An object contain retrieval - type information. - """ - self.matching_results = matching_results - self.results = results - self.aggregations = aggregations - self.passages = passages - self.duplicates_removed = duplicates_removed - self.session_token = session_token - self.retrieval_details = retrieval_details - - @classmethod - def _from_dict(cls, _dict): - """Initialize a QueryResponse object from a json dictionary.""" - args = {} - if 'matching_results' in _dict: - args['matching_results'] = _dict.get('matching_results') - if 'results' in _dict: - args['results'] = [ - QueryResult._from_dict(x) for x in (_dict.get('results')) - ] - if 'aggregations' in _dict: - args['aggregations'] = [ - QueryAggregation._from_dict(x) - for x in (_dict.get('aggregations')) - ] - if 'passages' in _dict: - args['passages'] = [ - QueryPassages._from_dict(x) for x in (_dict.get('passages')) - ] - if 'duplicates_removed' in _dict: - args['duplicates_removed'] = _dict.get('duplicates_removed') - if 'session_token' in _dict: - args['session_token'] = _dict.get('session_token') - if 'retrieval_details' in _dict: - args['retrieval_details'] = RetrievalDetails._from_dict( - _dict.get('retrieval_details')) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, - 'matching_results') and self.matching_results is not None: - _dict['matching_results'] = self.matching_results - if hasattr(self, 'results') and self.results is not None: - _dict['results'] = [x._to_dict() for x in self.results] - if hasattr(self, 'aggregations') and self.aggregations is not None: - _dict['aggregations'] = [x._to_dict() for x in self.aggregations] - if hasattr(self, 'passages') and self.passages is not None: - _dict['passages'] = [x._to_dict() for x in self.passages] - if hasattr( - self, - 'duplicates_removed') and self.duplicates_removed is not None: - _dict['duplicates_removed'] = self.duplicates_removed - if hasattr(self, 'session_token') and self.session_token is not None: - _dict['session_token'] = self.session_token - if hasattr(self, - 'retrieval_details') and self.retrieval_details is not None: - _dict['retrieval_details'] = self.retrieval_details._to_dict() - return _dict - - def __str__(self): - """Return a `str` version of this QueryResponse object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class QueryResult(object): - """ - QueryResult. - - :attr str id: (optional) The unique identifier of the document. - :attr dict metadata: (optional) Metadata of the document. - :attr str collection_id: (optional) The collection ID of the collection containing the - document for this result. - :attr QueryResultMetadata result_metadata: (optional) Metadata of a query result. - :attr str title: (optional) Automatically extracted result title. - """ - - def __init__(self, - id=None, - metadata=None, - collection_id=None, - result_metadata=None, - title=None, - **kwargs): - """ - Initialize a QueryResult object. - - :param str id: (optional) The unique identifier of the document. - :param dict metadata: (optional) Metadata of the document. - :param str collection_id: (optional) The collection ID of the collection - containing the document for this result. - :param QueryResultMetadata result_metadata: (optional) Metadata of a query result. - :param str title: (optional) Automatically extracted result title. - :param **kwargs: (optional) Any additional properties. - """ - self.id = id - self.metadata = metadata - self.collection_id = collection_id - self.result_metadata = result_metadata - self.title = title - for _key, _value in kwargs.items(): - setattr(self, _key, _value) - - @classmethod - def _from_dict(cls, _dict): - """Initialize a QueryResult object from a json dictionary.""" - args = {} - xtra = _dict.copy() - if 'id' in _dict: - args['id'] = _dict.get('id') - del xtra['id'] - if 'metadata' in _dict: - args['metadata'] = _dict.get('metadata') - del xtra['metadata'] - if 'collection_id' in _dict: - args['collection_id'] = _dict.get('collection_id') - del xtra['collection_id'] - if 'result_metadata' in _dict: - args['result_metadata'] = QueryResultMetadata._from_dict( - _dict.get('result_metadata')) - del xtra['result_metadata'] - if 'title' in _dict: - args['title'] = _dict.get('title') - del xtra['title'] - args.update(xtra) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'id') and self.id is not None: - _dict['id'] = self.id - if hasattr(self, 'metadata') and self.metadata is not None: - _dict['metadata'] = self.metadata - if hasattr(self, 'collection_id') and self.collection_id is not None: - _dict['collection_id'] = self.collection_id - if hasattr(self, - 'result_metadata') and self.result_metadata is not None: - _dict['result_metadata'] = self.result_metadata._to_dict() - if hasattr(self, 'title') and self.title is not None: - _dict['title'] = self.title - if hasattr(self, '_additionalProperties'): - for _key in self._additionalProperties: - _value = getattr(self, _key, None) - if _value is not None: - _dict[_key] = _value - return _dict - - def __setattr__(self, name, value): - properties = { - 'id', 'metadata', 'collection_id', 'result_metadata', 'title' - } - if not hasattr(self, '_additionalProperties'): - super(QueryResult, self).__setattr__('_additionalProperties', set()) - if name not in properties: - self._additionalProperties.add(name) - super(QueryResult, self).__setattr__(name, value) - - def __str__(self): - """Return a `str` version of this QueryResult object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class QueryResultMetadata(object): - """ - Metadata of a query result. - - :attr float score: An unbounded measure of the relevance of a particular result, - dependent on the query and matching document. A higher score indicates a greater match - to the query parameters. - :attr float confidence: (optional) The confidence score for the given result. - Calculated based on how relevant the result is estimated to be. confidence can range - from `0.0` to `1.0`. The higher the number, the more relevant the document. The - `confidence` value for a result was calculated using the model specified in the - `document_retrieval_strategy` field of the result set. - """ - - def __init__(self, score, confidence=None): - """ - Initialize a QueryResultMetadata object. - - :param float score: An unbounded measure of the relevance of a particular result, - dependent on the query and matching document. A higher score indicates a greater - match to the query parameters. - :param float confidence: (optional) The confidence score for the given result. - Calculated based on how relevant the result is estimated to be. confidence can - range from `0.0` to `1.0`. The higher the number, the more relevant the document. - The `confidence` value for a result was calculated using the model specified in - the `document_retrieval_strategy` field of the result set. - """ - self.score = score - self.confidence = confidence - - @classmethod - def _from_dict(cls, _dict): - """Initialize a QueryResultMetadata object from a json dictionary.""" - args = {} - if 'score' in _dict: - args['score'] = _dict.get('score') - else: - raise ValueError( - 'Required property \'score\' not present in QueryResultMetadata JSON' - ) - if 'confidence' in _dict: - args['confidence'] = _dict.get('confidence') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'score') and self.score is not None: - _dict['score'] = self.score - if hasattr(self, 'confidence') and self.confidence is not None: - _dict['confidence'] = self.confidence - return _dict - - def __str__(self): - """Return a `str` version of this QueryResultMetadata object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class RetrievalDetails(object): - """ - An object contain retrieval type information. - - :attr str document_retrieval_strategy: (optional) Indentifies the document retrieval - strategy used for this query. `relevancy_training` indicates that the results were - returned using a relevancy trained model. `continuous_relevancy_training` indicates - that the results were returned using the continuous relevancy training model created - by result feedback analysis. `untrained` means the results were returned using the - standard untrained model. - **Note**: In the event of trained collections being queried, but the trained model is - not used to return results, the **document_retrieval_strategy** will be listed as - `untrained`. - """ - - def __init__(self, document_retrieval_strategy=None): - """ - Initialize a RetrievalDetails object. - - :param str document_retrieval_strategy: (optional) Indentifies the document - retrieval strategy used for this query. `relevancy_training` indicates that the - results were returned using a relevancy trained model. - `continuous_relevancy_training` indicates that the results were returned using the - continuous relevancy training model created by result feedback analysis. - `untrained` means the results were returned using the standard untrained model. - **Note**: In the event of trained collections being queried, but the trained - model is not used to return results, the **document_retrieval_strategy** will be - listed as `untrained`. - """ - self.document_retrieval_strategy = document_retrieval_strategy - - @classmethod - def _from_dict(cls, _dict): - """Initialize a RetrievalDetails object from a json dictionary.""" - args = {} - if 'document_retrieval_strategy' in _dict: - args['document_retrieval_strategy'] = _dict.get( - 'document_retrieval_strategy') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'document_retrieval_strategy' - ) and self.document_retrieval_strategy is not None: - _dict[ - 'document_retrieval_strategy'] = self.document_retrieval_strategy - return _dict - - def __str__(self): - """Return a `str` version of this RetrievalDetails object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class SearchStatus(object): - """ - Information about the Continuous Relevancy Training for this environment. - - :attr str scope: (optional) Current scope of the training. Always returned as - `environment`. - :attr str status: (optional) The current status of Continuous Relevancy Training for - this environment. - :attr str status_description: (optional) Long description of the current Continuous - Relevancy Training status. - :attr date last_trained: (optional) The date stamp of the most recent completed - training for this environment. - """ - - def __init__(self, - scope=None, - status=None, - status_description=None, - last_trained=None): - """ - Initialize a SearchStatus object. - - :param str scope: (optional) Current scope of the training. Always returned as - `environment`. - :param str status: (optional) The current status of Continuous Relevancy Training - for this environment. - :param str status_description: (optional) Long description of the current - Continuous Relevancy Training status. - :param date last_trained: (optional) The date stamp of the most recent completed - training for this environment. - """ - self.scope = scope - self.status = status - self.status_description = status_description - self.last_trained = last_trained - - @classmethod - def _from_dict(cls, _dict): - """Initialize a SearchStatus object from a json dictionary.""" - args = {} - if 'scope' in _dict: - args['scope'] = _dict.get('scope') - if 'status' in _dict: - args['status'] = _dict.get('status') - if 'status_description' in _dict: - args['status_description'] = _dict.get('status_description') - if 'last_trained' in _dict: - args['last_trained'] = _dict.get('last_trained') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'scope') and self.scope is not None: - _dict['scope'] = self.scope - if hasattr(self, 'status') and self.status is not None: - _dict['status'] = self.status - if hasattr( - self, - 'status_description') and self.status_description is not None: - _dict['status_description'] = self.status_description - if hasattr(self, 'last_trained') and self.last_trained is not None: - _dict['last_trained'] = self.last_trained - return _dict - - def __str__(self): - """Return a `str` version of this SearchStatus object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class SegmentSettings(object): - """ - A list of Document Segmentation settings. - - :attr bool enabled: (optional) Enables/disables the Document Segmentation feature. - :attr list[str] selector_tags: (optional) Defines the heading level that splits into - document segments. Valid values are h1, h2, h3, h4, h5, h6. The content of the header - field that the segmentation splits at is used as the **title** field for that - segmented result. - """ - - def __init__(self, enabled=None, selector_tags=None): - """ - Initialize a SegmentSettings object. - - :param bool enabled: (optional) Enables/disables the Document Segmentation - feature. - :param list[str] selector_tags: (optional) Defines the heading level that splits - into document segments. Valid values are h1, h2, h3, h4, h5, h6. The content of - the header field that the segmentation splits at is used as the **title** field - for that segmented result. - """ - self.enabled = enabled - self.selector_tags = selector_tags - - @classmethod - def _from_dict(cls, _dict): - """Initialize a SegmentSettings object from a json dictionary.""" - args = {} - if 'enabled' in _dict: - args['enabled'] = _dict.get('enabled') - if 'selector_tags' in _dict: - args['selector_tags'] = _dict.get('selector_tags') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'enabled') and self.enabled is not None: - _dict['enabled'] = self.enabled - if hasattr(self, 'selector_tags') and self.selector_tags is not None: - _dict['selector_tags'] = self.selector_tags - return _dict - - def __str__(self): - """Return a `str` version of this SegmentSettings object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Source(object): - """ - Object containing source parameters for the configuration. - - :attr str type: (optional) The type of source to connect to. - - `box` indicates the configuration is to connect an instance of Enterprise Box. - - `salesforce` indicates the configuration is to connect to Salesforce. - - `sharepoint` indicates the configuration is to connect to Microsoft SharePoint - Online. - - `web_crawl` indicates the configuration is to perform a web page crawl. - - `cloud_object_storage` indicates the configuration is to connect to a cloud object - store. - :attr str credential_id: (optional) The **credential_id** of the credentials to use to - connect to the source. Credentials are defined using the **credentials** method. The - **source_type** of the credentials used must match the **type** field specified in - this object. - :attr SourceSchedule schedule: (optional) Object containing the schedule information - for the source. - :attr SourceOptions options: (optional) The **options** object defines which items to - crawl from the source system. - """ - - def __init__(self, - type=None, - credential_id=None, - schedule=None, - options=None): - """ - Initialize a Source object. - - :param str type: (optional) The type of source to connect to. - - `box` indicates the configuration is to connect an instance of Enterprise Box. - - `salesforce` indicates the configuration is to connect to Salesforce. - - `sharepoint` indicates the configuration is to connect to Microsoft SharePoint - Online. - - `web_crawl` indicates the configuration is to perform a web page crawl. - - `cloud_object_storage` indicates the configuration is to connect to a cloud - object store. - :param str credential_id: (optional) The **credential_id** of the credentials to - use to connect to the source. Credentials are defined using the **credentials** - method. The **source_type** of the credentials used must match the **type** field - specified in this object. - :param SourceSchedule schedule: (optional) Object containing the schedule - information for the source. - :param SourceOptions options: (optional) The **options** object defines which - items to crawl from the source system. - """ - self.type = type - self.credential_id = credential_id - self.schedule = schedule - self.options = options - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Source object from a json dictionary.""" - args = {} - if 'type' in _dict: - args['type'] = _dict.get('type') - if 'credential_id' in _dict: - args['credential_id'] = _dict.get('credential_id') - if 'schedule' in _dict: - args['schedule'] = SourceSchedule._from_dict(_dict.get('schedule')) - if 'options' in _dict: - args['options'] = SourceOptions._from_dict(_dict.get('options')) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'type') and self.type is not None: - _dict['type'] = self.type - if hasattr(self, 'credential_id') and self.credential_id is not None: - _dict['credential_id'] = self.credential_id - if hasattr(self, 'schedule') and self.schedule is not None: - _dict['schedule'] = self.schedule._to_dict() - if hasattr(self, 'options') and self.options is not None: - _dict['options'] = self.options._to_dict() - return _dict - - def __str__(self): - """Return a `str` version of this Source object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class SourceOptions(object): - """ - The **options** object defines which items to crawl from the source system. - - :attr list[SourceOptionsFolder] folders: (optional) Array of folders to crawl from the - Box source. Only valid, and required, when the **type** field of the **source** object - is set to `box`. - :attr list[SourceOptionsObject] objects: (optional) Array of Salesforce document - object types to crawl from the Salesforce source. Only valid, and required, when the - **type** field of the **source** object is set to `salesforce`. - :attr list[SourceOptionsSiteColl] site_collections: (optional) Array of Microsoft - SharePointoint Online site collections to crawl from the SharePoint source. Only valid - and required when the **type** field of the **source** object is set to `sharepoint`. - :attr list[SourceOptionsWebCrawl] urls: (optional) Array of Web page URLs to begin - crawling the web from. Only valid and required when the **type** field of the - **source** object is set to `web_crawl`. - :attr list[SourceOptionsBuckets] buckets: (optional) Array of cloud object store - buckets to begin crawling. Only valid and required when the **type** field of the - **source** object is set to `cloud_object_store`, and the **crawl_all_buckets** field - is `false` or not specified. - :attr bool crawl_all_buckets: (optional) When `true`, all buckets in the specified - cloud object store are crawled. If set to `true`, the **buckets** array must not be - specified. - """ - - def __init__(self, - folders=None, - objects=None, - site_collections=None, - urls=None, - buckets=None, - crawl_all_buckets=None): - """ - Initialize a SourceOptions object. - - :param list[SourceOptionsFolder] folders: (optional) Array of folders to crawl - from the Box source. Only valid, and required, when the **type** field of the - **source** object is set to `box`. - :param list[SourceOptionsObject] objects: (optional) Array of Salesforce document - object types to crawl from the Salesforce source. Only valid, and required, when - the **type** field of the **source** object is set to `salesforce`. - :param list[SourceOptionsSiteColl] site_collections: (optional) Array of Microsoft - SharePointoint Online site collections to crawl from the SharePoint source. Only - valid and required when the **type** field of the **source** object is set to - `sharepoint`. - :param list[SourceOptionsWebCrawl] urls: (optional) Array of Web page URLs to - begin crawling the web from. Only valid and required when the **type** field of - the **source** object is set to `web_crawl`. - :param list[SourceOptionsBuckets] buckets: (optional) Array of cloud object store - buckets to begin crawling. Only valid and required when the **type** field of the - **source** object is set to `cloud_object_store`, and the **crawl_all_buckets** - field is `false` or not specified. - :param bool crawl_all_buckets: (optional) When `true`, all buckets in the - specified cloud object store are crawled. If set to `true`, the **buckets** array - must not be specified. - """ - self.folders = folders - self.objects = objects - self.site_collections = site_collections - self.urls = urls - self.buckets = buckets - self.crawl_all_buckets = crawl_all_buckets - - @classmethod - def _from_dict(cls, _dict): - """Initialize a SourceOptions object from a json dictionary.""" - args = {} - if 'folders' in _dict: - args['folders'] = [ - SourceOptionsFolder._from_dict(x) - for x in (_dict.get('folders')) - ] - if 'objects' in _dict: - args['objects'] = [ - SourceOptionsObject._from_dict(x) - for x in (_dict.get('objects')) - ] - if 'site_collections' in _dict: - args['site_collections'] = [ - SourceOptionsSiteColl._from_dict(x) - for x in (_dict.get('site_collections')) - ] - if 'urls' in _dict: - args['urls'] = [ - SourceOptionsWebCrawl._from_dict(x) for x in (_dict.get('urls')) - ] - if 'buckets' in _dict: - args['buckets'] = [ - SourceOptionsBuckets._from_dict(x) - for x in (_dict.get('buckets')) - ] - if 'crawl_all_buckets' in _dict: - args['crawl_all_buckets'] = _dict.get('crawl_all_buckets') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'folders') and self.folders is not None: - _dict['folders'] = [x._to_dict() for x in self.folders] - if hasattr(self, 'objects') and self.objects is not None: - _dict['objects'] = [x._to_dict() for x in self.objects] - if hasattr(self, - 'site_collections') and self.site_collections is not None: - _dict['site_collections'] = [ - x._to_dict() for x in self.site_collections - ] - if hasattr(self, 'urls') and self.urls is not None: - _dict['urls'] = [x._to_dict() for x in self.urls] - if hasattr(self, 'buckets') and self.buckets is not None: - _dict['buckets'] = [x._to_dict() for x in self.buckets] - if hasattr(self, - 'crawl_all_buckets') and self.crawl_all_buckets is not None: - _dict['crawl_all_buckets'] = self.crawl_all_buckets - return _dict - - def __str__(self): - """Return a `str` version of this SourceOptions object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class SourceOptionsBuckets(object): - """ - Object defining a cloud object store bucket to crawl. - - :attr str name: The name of the cloud object store bucket to crawl. - :attr int limit: (optional) The number of documents to crawl from this cloud object - store bucket. If not specified, all documents in the bucket are crawled. - """ - - def __init__(self, name, limit=None): - """ - Initialize a SourceOptionsBuckets object. - - :param str name: The name of the cloud object store bucket to crawl. - :param int limit: (optional) The number of documents to crawl from this cloud - object store bucket. If not specified, all documents in the bucket are crawled. - """ - self.name = name - self.limit = limit - - @classmethod - def _from_dict(cls, _dict): - """Initialize a SourceOptionsBuckets object from a json dictionary.""" - args = {} - if 'name' in _dict: - args['name'] = _dict.get('name') - else: - raise ValueError( - 'Required property \'name\' not present in SourceOptionsBuckets JSON' - ) - if 'limit' in _dict: - args['limit'] = _dict.get('limit') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'name') and self.name is not None: - _dict['name'] = self.name - if hasattr(self, 'limit') and self.limit is not None: - _dict['limit'] = self.limit - return _dict - - def __str__(self): - """Return a `str` version of this SourceOptionsBuckets object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class SourceOptionsFolder(object): - """ - Object that defines a box folder to crawl with this configuration. - - :attr str owner_user_id: The Box user ID of the user who owns the folder to crawl. - :attr str folder_id: The Box folder ID of the folder to crawl. - :attr int limit: (optional) The maximum number of documents to crawl for this folder. - By default, all documents in the folder are crawled. - """ - - def __init__(self, owner_user_id, folder_id, limit=None): - """ - Initialize a SourceOptionsFolder object. - - :param str owner_user_id: The Box user ID of the user who owns the folder to - crawl. - :param str folder_id: The Box folder ID of the folder to crawl. - :param int limit: (optional) The maximum number of documents to crawl for this - folder. By default, all documents in the folder are crawled. - """ - self.owner_user_id = owner_user_id - self.folder_id = folder_id - self.limit = limit - - @classmethod - def _from_dict(cls, _dict): - """Initialize a SourceOptionsFolder object from a json dictionary.""" - args = {} - if 'owner_user_id' in _dict: - args['owner_user_id'] = _dict.get('owner_user_id') - else: - raise ValueError( - 'Required property \'owner_user_id\' not present in SourceOptionsFolder JSON' - ) - if 'folder_id' in _dict: - args['folder_id'] = _dict.get('folder_id') - else: - raise ValueError( - 'Required property \'folder_id\' not present in SourceOptionsFolder JSON' - ) - if 'limit' in _dict: - args['limit'] = _dict.get('limit') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'owner_user_id') and self.owner_user_id is not None: - _dict['owner_user_id'] = self.owner_user_id - if hasattr(self, 'folder_id') and self.folder_id is not None: - _dict['folder_id'] = self.folder_id - if hasattr(self, 'limit') and self.limit is not None: - _dict['limit'] = self.limit - return _dict - - def __str__(self): - """Return a `str` version of this SourceOptionsFolder object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class SourceOptionsObject(object): - """ - Object that defines a Salesforce document object type crawl with this configuration. - - :attr str name: The name of the Salesforce document object to crawl. For example, - `case`. - :attr int limit: (optional) The maximum number of documents to crawl for this document - object. By default, all documents in the document object are crawled. - """ - - def __init__(self, name, limit=None): - """ - Initialize a SourceOptionsObject object. - - :param str name: The name of the Salesforce document object to crawl. For example, - `case`. - :param int limit: (optional) The maximum number of documents to crawl for this - document object. By default, all documents in the document object are crawled. - """ - self.name = name - self.limit = limit - - @classmethod - def _from_dict(cls, _dict): - """Initialize a SourceOptionsObject object from a json dictionary.""" - args = {} - if 'name' in _dict: - args['name'] = _dict.get('name') - else: - raise ValueError( - 'Required property \'name\' not present in SourceOptionsObject JSON' - ) - if 'limit' in _dict: - args['limit'] = _dict.get('limit') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'name') and self.name is not None: - _dict['name'] = self.name - if hasattr(self, 'limit') and self.limit is not None: - _dict['limit'] = self.limit - return _dict - - def __str__(self): - """Return a `str` version of this SourceOptionsObject object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class SourceOptionsSiteColl(object): - """ - Object that defines a Microsoft SharePoint site collection to crawl with this - configuration. - - :attr str site_collection_path: The Microsoft SharePoint Online site collection path - to crawl. The path must be be relative to the **organization_url** that was specified - in the credentials associated with this source configuration. - :attr int limit: (optional) The maximum number of documents to crawl for this site - collection. By default, all documents in the site collection are crawled. - """ - - def __init__(self, site_collection_path, limit=None): - """ - Initialize a SourceOptionsSiteColl object. - - :param str site_collection_path: The Microsoft SharePoint Online site collection - path to crawl. The path must be be relative to the **organization_url** that was - specified in the credentials associated with this source configuration. - :param int limit: (optional) The maximum number of documents to crawl for this - site collection. By default, all documents in the site collection are crawled. - """ - self.site_collection_path = site_collection_path - self.limit = limit - - @classmethod - def _from_dict(cls, _dict): - """Initialize a SourceOptionsSiteColl object from a json dictionary.""" - args = {} - if 'site_collection_path' in _dict: - args['site_collection_path'] = _dict.get('site_collection_path') - else: - raise ValueError( - 'Required property \'site_collection_path\' not present in SourceOptionsSiteColl JSON' - ) - if 'limit' in _dict: - args['limit'] = _dict.get('limit') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'site_collection_path' - ) and self.site_collection_path is not None: - _dict['site_collection_path'] = self.site_collection_path - if hasattr(self, 'limit') and self.limit is not None: - _dict['limit'] = self.limit - return _dict - - def __str__(self): - """Return a `str` version of this SourceOptionsSiteColl object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class SourceOptionsWebCrawl(object): - """ - Object defining which URL to crawl and how to crawl it. - - :attr str url: The starting URL to crawl. - :attr bool limit_to_starting_hosts: (optional) When `true`, crawls of the specified - URL are limited to the host part of the **url** field. - :attr str crawl_speed: (optional) The number of concurrent URLs to fetch. `gentle` - means one URL is fetched at a time with a delay between each call. `normal` means as - many as two URLs are fectched concurrently with a short delay between fetch calls. - `aggressive` means that up to ten URLs are fetched concurrently with a short delay - between fetch calls. - :attr bool allow_untrusted_certificate: (optional) When `true`, allows the crawl to - interact with HTTPS sites with SSL certificates with untrusted signers. - :attr int maximum_hops: (optional) The maximum number of hops to make from the initial - URL. When a page is crawled each link on that page will also be crawled if it is - within the **maximum_hops** from the initial URL. The first page crawled is 0 hops, - each link crawled from the first page is 1 hop, each link crawled from those pages is - 2 hops, and so on. - :attr int request_timeout: (optional) The maximum milliseconds to wait for a response - from the web server. - :attr bool override_robots_txt: (optional) When `true`, the crawler will ignore any - `robots.txt` encountered by the crawler. This should only ever be done when crawling a - web site the user owns. This must be be set to `true` when a **gateway_id** is specied - in the **credentials**. - """ - - def __init__(self, - url, - limit_to_starting_hosts=None, - crawl_speed=None, - allow_untrusted_certificate=None, - maximum_hops=None, - request_timeout=None, - override_robots_txt=None): - """ - Initialize a SourceOptionsWebCrawl object. - - :param str url: The starting URL to crawl. - :param bool limit_to_starting_hosts: (optional) When `true`, crawls of the - specified URL are limited to the host part of the **url** field. - :param str crawl_speed: (optional) The number of concurrent URLs to fetch. - `gentle` means one URL is fetched at a time with a delay between each call. - `normal` means as many as two URLs are fectched concurrently with a short delay - between fetch calls. `aggressive` means that up to ten URLs are fetched - concurrently with a short delay between fetch calls. - :param bool allow_untrusted_certificate: (optional) When `true`, allows the crawl - to interact with HTTPS sites with SSL certificates with untrusted signers. - :param int maximum_hops: (optional) The maximum number of hops to make from the - initial URL. When a page is crawled each link on that page will also be crawled if - it is within the **maximum_hops** from the initial URL. The first page crawled is - 0 hops, each link crawled from the first page is 1 hop, each link crawled from - those pages is 2 hops, and so on. - :param int request_timeout: (optional) The maximum milliseconds to wait for a - response from the web server. - :param bool override_robots_txt: (optional) When `true`, the crawler will ignore - any `robots.txt` encountered by the crawler. This should only ever be done when - crawling a web site the user owns. This must be be set to `true` when a - **gateway_id** is specied in the **credentials**. - """ - self.url = url - self.limit_to_starting_hosts = limit_to_starting_hosts - self.crawl_speed = crawl_speed - self.allow_untrusted_certificate = allow_untrusted_certificate - self.maximum_hops = maximum_hops - self.request_timeout = request_timeout - self.override_robots_txt = override_robots_txt - - @classmethod - def _from_dict(cls, _dict): - """Initialize a SourceOptionsWebCrawl object from a json dictionary.""" - args = {} - if 'url' in _dict: - args['url'] = _dict.get('url') - else: - raise ValueError( - 'Required property \'url\' not present in SourceOptionsWebCrawl JSON' - ) - if 'limit_to_starting_hosts' in _dict: - args['limit_to_starting_hosts'] = _dict.get( - 'limit_to_starting_hosts') - if 'crawl_speed' in _dict: - args['crawl_speed'] = _dict.get('crawl_speed') - if 'allow_untrusted_certificate' in _dict: - args['allow_untrusted_certificate'] = _dict.get( - 'allow_untrusted_certificate') - if 'maximum_hops' in _dict: - args['maximum_hops'] = _dict.get('maximum_hops') - if 'request_timeout' in _dict: - args['request_timeout'] = _dict.get('request_timeout') - if 'override_robots_txt' in _dict: - args['override_robots_txt'] = _dict.get('override_robots_txt') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'url') and self.url is not None: - _dict['url'] = self.url - if hasattr(self, 'limit_to_starting_hosts' - ) and self.limit_to_starting_hosts is not None: - _dict['limit_to_starting_hosts'] = self.limit_to_starting_hosts - if hasattr(self, 'crawl_speed') and self.crawl_speed is not None: - _dict['crawl_speed'] = self.crawl_speed - if hasattr(self, 'allow_untrusted_certificate' - ) and self.allow_untrusted_certificate is not None: - _dict[ - 'allow_untrusted_certificate'] = self.allow_untrusted_certificate - if hasattr(self, 'maximum_hops') and self.maximum_hops is not None: - _dict['maximum_hops'] = self.maximum_hops - if hasattr(self, - 'request_timeout') and self.request_timeout is not None: - _dict['request_timeout'] = self.request_timeout - if hasattr( - self, - 'override_robots_txt') and self.override_robots_txt is not None: - _dict['override_robots_txt'] = self.override_robots_txt - return _dict - - def __str__(self): - """Return a `str` version of this SourceOptionsWebCrawl object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class SourceSchedule(object): - """ - Object containing the schedule information for the source. - - :attr bool enabled: (optional) When `true`, the source is re-crawled based on the - **frequency** field in this object. When `false` the source is not re-crawled; When - `false` and connecting to Salesforce the source is crawled annually. - :attr str time_zone: (optional) The time zone to base source crawl times on. Possible - values correspond to the IANA (Internet Assigned Numbers Authority) time zones list. - :attr str frequency: (optional) The crawl schedule in the specified **time_zone**. - - `daily`: Runs every day between 00:00 and 06:00. - - `weekly`: Runs every week on Sunday between 00:00 and 06:00. - - `monthly`: Runs the on the first Sunday of every month between 00:00 and 06:00. - """ - - def __init__(self, enabled=None, time_zone=None, frequency=None): - """ - Initialize a SourceSchedule object. - - :param bool enabled: (optional) When `true`, the source is re-crawled based on the - **frequency** field in this object. When `false` the source is not re-crawled; - When `false` and connecting to Salesforce the source is crawled annually. - :param str time_zone: (optional) The time zone to base source crawl times on. - Possible values correspond to the IANA (Internet Assigned Numbers Authority) time - zones list. - :param str frequency: (optional) The crawl schedule in the specified - **time_zone**. - - `daily`: Runs every day between 00:00 and 06:00. - - `weekly`: Runs every week on Sunday between 00:00 and 06:00. - - `monthly`: Runs the on the first Sunday of every month between 00:00 and 06:00. - """ - self.enabled = enabled - self.time_zone = time_zone - self.frequency = frequency - - @classmethod - def _from_dict(cls, _dict): - """Initialize a SourceSchedule object from a json dictionary.""" - args = {} - if 'enabled' in _dict: - args['enabled'] = _dict.get('enabled') - if 'time_zone' in _dict: - args['time_zone'] = _dict.get('time_zone') - if 'frequency' in _dict: - args['frequency'] = _dict.get('frequency') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'enabled') and self.enabled is not None: - _dict['enabled'] = self.enabled - if hasattr(self, 'time_zone') and self.time_zone is not None: - _dict['time_zone'] = self.time_zone - if hasattr(self, 'frequency') and self.frequency is not None: - _dict['frequency'] = self.frequency - return _dict - - def __str__(self): - """Return a `str` version of this SourceSchedule object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class SourceStatus(object): - """ - Object containing source crawl status information. - - :attr str status: (optional) The current status of the source crawl for this - collection. This field returns `not_configured` if the default configuration for this - source does not have a **source** object defined. - - `running` indicates that a crawl to fetch more documents is in progress. - - `complete` indicates that the crawl has completed with no errors. - - `queued` indicates that the crawl has been paused by the system and will - automatically restart when possible. - :attr datetime last_updated: (optional) Date in UTC format indicating when the last - crawl was attempted. If `null`, no crawl was completed. - """ - - def __init__(self, status=None, last_updated=None): - """ - Initialize a SourceStatus object. - - :param str status: (optional) The current status of the source crawl for this - collection. This field returns `not_configured` if the default configuration for - this source does not have a **source** object defined. - - `running` indicates that a crawl to fetch more documents is in progress. - - `complete` indicates that the crawl has completed with no errors. - - `queued` indicates that the crawl has been paused by the system and will - automatically restart when possible. - :param datetime last_updated: (optional) Date in UTC format indicating when the - last crawl was attempted. If `null`, no crawl was completed. - """ - self.status = status - self.last_updated = last_updated - - @classmethod - def _from_dict(cls, _dict): - """Initialize a SourceStatus object from a json dictionary.""" - args = {} - if 'status' in _dict: - args['status'] = _dict.get('status') - if 'last_updated' in _dict: - args['last_updated'] = string_to_datetime(_dict.get('last_updated')) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'status') and self.status is not None: - _dict['status'] = self.status - if hasattr(self, 'last_updated') and self.last_updated is not None: - _dict['last_updated'] = datetime_to_string(self.last_updated) - return _dict - - def __str__(self): - """Return a `str` version of this SourceStatus object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Term(object): - """ - Term. - - :attr str field: (optional) The field where the aggregation is located in the - document. - :attr int count: (optional) - """ - - def __init__(self, - type=None, - results=None, - matching_results=None, - aggregations=None, - field=None, - count=None): - """ - Initialize a Term object. - - :param str type: (optional) The type of aggregation command used. For example: - term, filter, max, min, etc. - :param list[AggregationResult] results: (optional) Array of aggregation results. - :param int matching_results: (optional) Number of matching results. - :param list[QueryAggregation] aggregations: (optional) Aggregations returned by - the Discovery service. - :param str field: (optional) The field where the aggregation is located in the - document. - :param int count: (optional) - """ - self.field = field - self.count = count - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Term object from a json dictionary.""" - args = {} - if 'field' in _dict: - args['field'] = _dict.get('field') - if 'count' in _dict: - args['count'] = _dict.get('count') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'field') and self.field is not None: - _dict['field'] = self.field - if hasattr(self, 'count') and self.count is not None: - _dict['count'] = self.count - return _dict - - def __str__(self): - """Return a `str` version of this Term object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class TestDocument(object): - """ - TestDocument. - - :attr str configuration_id: (optional) The unique identifier for the configuration. - :attr str status: (optional) Status of the preview operation. - :attr int enriched_field_units: (optional) The number of 10-kB chunks of field data - that were enriched. This can be used to estimate the cost of running a real ingestion. - :attr str original_media_type: (optional) Format of the test document. - :attr list[DocumentSnapshot] snapshots: (optional) An array of objects that describe - each step in the preview process. - :attr list[Notice] notices: (optional) An array of notice messages about the preview - operation. - """ - - def __init__(self, - configuration_id=None, - status=None, - enriched_field_units=None, - original_media_type=None, - snapshots=None, - notices=None): - """ - Initialize a TestDocument object. - - :param str configuration_id: (optional) The unique identifier for the - configuration. - :param str status: (optional) Status of the preview operation. - :param int enriched_field_units: (optional) The number of 10-kB chunks of field - data that were enriched. This can be used to estimate the cost of running a real - ingestion. - :param str original_media_type: (optional) Format of the test document. - :param list[DocumentSnapshot] snapshots: (optional) An array of objects that - describe each step in the preview process. - :param list[Notice] notices: (optional) An array of notice messages about the - preview operation. - """ - self.configuration_id = configuration_id - self.status = status - self.enriched_field_units = enriched_field_units - self.original_media_type = original_media_type - self.snapshots = snapshots - self.notices = notices - - @classmethod - def _from_dict(cls, _dict): - """Initialize a TestDocument object from a json dictionary.""" - args = {} - if 'configuration_id' in _dict: - args['configuration_id'] = _dict.get('configuration_id') - if 'status' in _dict: - args['status'] = _dict.get('status') - if 'enriched_field_units' in _dict: - args['enriched_field_units'] = _dict.get('enriched_field_units') - if 'original_media_type' in _dict: - args['original_media_type'] = _dict.get('original_media_type') - if 'snapshots' in _dict: - args['snapshots'] = [ - DocumentSnapshot._from_dict(x) for x in (_dict.get('snapshots')) - ] - if 'notices' in _dict: - args['notices'] = [ - Notice._from_dict(x) for x in (_dict.get('notices')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, - 'configuration_id') and self.configuration_id is not None: - _dict['configuration_id'] = self.configuration_id - if hasattr(self, 'status') and self.status is not None: - _dict['status'] = self.status - if hasattr(self, 'enriched_field_units' - ) and self.enriched_field_units is not None: - _dict['enriched_field_units'] = self.enriched_field_units - if hasattr( - self, - 'original_media_type') and self.original_media_type is not None: - _dict['original_media_type'] = self.original_media_type - if hasattr(self, 'snapshots') and self.snapshots is not None: - _dict['snapshots'] = [x._to_dict() for x in self.snapshots] - if hasattr(self, 'notices') and self.notices is not None: - _dict['notices'] = [x._to_dict() for x in self.notices] - return _dict - - def __str__(self): - """Return a `str` version of this TestDocument object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Timeslice(object): - """ - Timeslice. - - :attr str field: (optional) The field where the aggregation is located in the - document. - :attr str interval: (optional) Interval of the aggregation. Valid date interval values - are second/seconds minute/minutes, hour/hours, day/days, week/weeks, month/months, and - year/years. - :attr bool anomaly: (optional) Used to indicate that anomaly detection should be - performed. Anomaly detection is used to locate unusual datapoints within a time - series. - """ - - def __init__(self, - type=None, - results=None, - matching_results=None, - aggregations=None, - field=None, - interval=None, - anomaly=None): - """ - Initialize a Timeslice object. - - :param str type: (optional) The type of aggregation command used. For example: - term, filter, max, min, etc. - :param list[AggregationResult] results: (optional) Array of aggregation results. - :param int matching_results: (optional) Number of matching results. - :param list[QueryAggregation] aggregations: (optional) Aggregations returned by - the Discovery service. - :param str field: (optional) The field where the aggregation is located in the - document. - :param str interval: (optional) Interval of the aggregation. Valid date interval - values are second/seconds minute/minutes, hour/hours, day/days, week/weeks, - month/months, and year/years. - :param bool anomaly: (optional) Used to indicate that anomaly detection should be - performed. Anomaly detection is used to locate unusual datapoints within a time - series. - """ - self.field = field - self.interval = interval - self.anomaly = anomaly - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Timeslice object from a json dictionary.""" - args = {} - if 'field' in _dict: - args['field'] = _dict.get('field') - if 'interval' in _dict: - args['interval'] = _dict.get('interval') - if 'anomaly' in _dict: - args['anomaly'] = _dict.get('anomaly') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'field') and self.field is not None: - _dict['field'] = self.field - if hasattr(self, 'interval') and self.interval is not None: - _dict['interval'] = self.interval - if hasattr(self, 'anomaly') and self.anomaly is not None: - _dict['anomaly'] = self.anomaly - return _dict - - def __str__(self): - """Return a `str` version of this Timeslice object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class TokenDictRule(object): - """ - An object defining a single tokenizaion rule. - - :attr str text: The string to tokenize. - :attr list[str] tokens: Array of tokens that the `text` field is split into when - found. - :attr list[str] readings: (optional) Array of tokens that represent the content of the - `text` field in an alternate character set. - :attr str part_of_speech: The part of speech that the `text` string belongs to. For - example `noun`. Custom parts of speech can be specified. - """ - - def __init__(self, text, tokens, part_of_speech, readings=None): - """ - Initialize a TokenDictRule object. - - :param str text: The string to tokenize. - :param list[str] tokens: Array of tokens that the `text` field is split into when - found. - :param str part_of_speech: The part of speech that the `text` string belongs to. - For example `noun`. Custom parts of speech can be specified. - :param list[str] readings: (optional) Array of tokens that represent the content - of the `text` field in an alternate character set. - """ - self.text = text - self.tokens = tokens - self.readings = readings - self.part_of_speech = part_of_speech - - @classmethod - def _from_dict(cls, _dict): - """Initialize a TokenDictRule object from a json dictionary.""" - args = {} - if 'text' in _dict: - args['text'] = _dict.get('text') - else: - raise ValueError( - 'Required property \'text\' not present in TokenDictRule JSON') - if 'tokens' in _dict: - args['tokens'] = _dict.get('tokens') - else: - raise ValueError( - 'Required property \'tokens\' not present in TokenDictRule JSON' - ) - if 'readings' in _dict: - args['readings'] = _dict.get('readings') - if 'part_of_speech' in _dict: - args['part_of_speech'] = _dict.get('part_of_speech') - else: - raise ValueError( - 'Required property \'part_of_speech\' not present in TokenDictRule JSON' - ) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - if hasattr(self, 'tokens') and self.tokens is not None: - _dict['tokens'] = self.tokens - if hasattr(self, 'readings') and self.readings is not None: - _dict['readings'] = self.readings - if hasattr(self, 'part_of_speech') and self.part_of_speech is not None: - _dict['part_of_speech'] = self.part_of_speech - return _dict - - def __str__(self): - """Return a `str` version of this TokenDictRule object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class TokenDictStatusResponse(object): - """ - Object describing the current status of the wordlist. - - :attr str status: (optional) Current wordlist status for the specified collection. - :attr str type: (optional) The type for this wordlist. Can be - `tokenization_dictionary` or `stopwords`. - """ - - def __init__(self, status=None, type=None): - """ - Initialize a TokenDictStatusResponse object. - - :param str status: (optional) Current wordlist status for the specified - collection. - :param str type: (optional) The type for this wordlist. Can be - `tokenization_dictionary` or `stopwords`. - """ - self.status = status - self.type = type - - @classmethod - def _from_dict(cls, _dict): - """Initialize a TokenDictStatusResponse object from a json dictionary.""" - args = {} - if 'status' in _dict: - args['status'] = _dict.get('status') - if 'type' in _dict: - args['type'] = _dict.get('type') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'status') and self.status is not None: - _dict['status'] = self.status - if hasattr(self, 'type') and self.type is not None: - _dict['type'] = self.type - return _dict - - def __str__(self): - """Return a `str` version of this TokenDictStatusResponse object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class TopHits(object): - """ - TopHits. - - :attr int size: (optional) Number of top hits returned by the aggregation. - :attr TopHitsResults hits: (optional) - """ - - def __init__(self, - type=None, - results=None, - matching_results=None, - aggregations=None, - size=None, - hits=None): - """ - Initialize a TopHits object. - - :param str type: (optional) The type of aggregation command used. For example: - term, filter, max, min, etc. - :param list[AggregationResult] results: (optional) Array of aggregation results. - :param int matching_results: (optional) Number of matching results. - :param list[QueryAggregation] aggregations: (optional) Aggregations returned by - the Discovery service. - :param int size: (optional) Number of top hits returned by the aggregation. - :param TopHitsResults hits: (optional) - """ - self.size = size - self.hits = hits - - @classmethod - def _from_dict(cls, _dict): - """Initialize a TopHits object from a json dictionary.""" - args = {} - if 'size' in _dict: - args['size'] = _dict.get('size') - if 'hits' in _dict: - args['hits'] = TopHitsResults._from_dict(_dict.get('hits')) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'size') and self.size is not None: - _dict['size'] = self.size - if hasattr(self, 'hits') and self.hits is not None: - _dict['hits'] = self.hits._to_dict() - return _dict - - def __str__(self): - """Return a `str` version of this TopHits object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class TopHitsResults(object): - """ - TopHitsResults. - - :attr int matching_results: (optional) Number of matching results. - :attr list[QueryResult] hits: (optional) Top results returned by the aggregation. - """ - - def __init__(self, matching_results=None, hits=None): - """ - Initialize a TopHitsResults object. - - :param int matching_results: (optional) Number of matching results. - :param list[QueryResult] hits: (optional) Top results returned by the aggregation. - """ - self.matching_results = matching_results - self.hits = hits - - @classmethod - def _from_dict(cls, _dict): - """Initialize a TopHitsResults object from a json dictionary.""" - args = {} - if 'matching_results' in _dict: - args['matching_results'] = _dict.get('matching_results') - if 'hits' in _dict: - args['hits'] = [ - QueryResult._from_dict(x) for x in (_dict.get('hits')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, - 'matching_results') and self.matching_results is not None: - _dict['matching_results'] = self.matching_results - if hasattr(self, 'hits') and self.hits is not None: - _dict['hits'] = [x._to_dict() for x in self.hits] - return _dict - - def __str__(self): - """Return a `str` version of this TopHitsResults object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class TrainingDataSet(object): - """ - TrainingDataSet. - - :attr str environment_id: (optional) The environment id associated with this training - data set. - :attr str collection_id: (optional) The collection id associated with this training - data set. - :attr list[TrainingQuery] queries: (optional) Array of training queries. - """ - - def __init__(self, environment_id=None, collection_id=None, queries=None): - """ - Initialize a TrainingDataSet object. - - :param str environment_id: (optional) The environment id associated with this - training data set. - :param str collection_id: (optional) The collection id associated with this - training data set. - :param list[TrainingQuery] queries: (optional) Array of training queries. - """ - self.environment_id = environment_id - self.collection_id = collection_id - self.queries = queries - - @classmethod - def _from_dict(cls, _dict): - """Initialize a TrainingDataSet object from a json dictionary.""" - args = {} - if 'environment_id' in _dict: - args['environment_id'] = _dict.get('environment_id') - if 'collection_id' in _dict: - args['collection_id'] = _dict.get('collection_id') - if 'queries' in _dict: - args['queries'] = [ - TrainingQuery._from_dict(x) for x in (_dict.get('queries')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'environment_id') and self.environment_id is not None: - _dict['environment_id'] = self.environment_id - if hasattr(self, 'collection_id') and self.collection_id is not None: - _dict['collection_id'] = self.collection_id - if hasattr(self, 'queries') and self.queries is not None: - _dict['queries'] = [x._to_dict() for x in self.queries] - return _dict - - def __str__(self): - """Return a `str` version of this TrainingDataSet object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class TrainingExample(object): - """ - TrainingExample. - - :attr str document_id: (optional) The document ID associated with this training - example. - :attr str cross_reference: (optional) The cross reference associated with this - training example. - :attr int relevance: (optional) The relevance of the training example. - """ - - def __init__(self, document_id=None, cross_reference=None, relevance=None): - """ - Initialize a TrainingExample object. - - :param str document_id: (optional) The document ID associated with this training - example. - :param str cross_reference: (optional) The cross reference associated with this - training example. - :param int relevance: (optional) The relevance of the training example. - """ - self.document_id = document_id - self.cross_reference = cross_reference - self.relevance = relevance - - @classmethod - def _from_dict(cls, _dict): - """Initialize a TrainingExample object from a json dictionary.""" - args = {} - if 'document_id' in _dict: - args['document_id'] = _dict.get('document_id') - if 'cross_reference' in _dict: - args['cross_reference'] = _dict.get('cross_reference') - if 'relevance' in _dict: - args['relevance'] = _dict.get('relevance') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'document_id') and self.document_id is not None: - _dict['document_id'] = self.document_id - if hasattr(self, - 'cross_reference') and self.cross_reference is not None: - _dict['cross_reference'] = self.cross_reference - if hasattr(self, 'relevance') and self.relevance is not None: - _dict['relevance'] = self.relevance - return _dict - - def __str__(self): - """Return a `str` version of this TrainingExample object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class TrainingExampleList(object): - """ - TrainingExampleList. - - :attr list[TrainingExample] examples: (optional) Array of training examples. - """ - - def __init__(self, examples=None): - """ - Initialize a TrainingExampleList object. - - :param list[TrainingExample] examples: (optional) Array of training examples. - """ - self.examples = examples - - @classmethod - def _from_dict(cls, _dict): - """Initialize a TrainingExampleList object from a json dictionary.""" - args = {} - if 'examples' in _dict: - args['examples'] = [ - TrainingExample._from_dict(x) for x in (_dict.get('examples')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'examples') and self.examples is not None: - _dict['examples'] = [x._to_dict() for x in self.examples] - return _dict - - def __str__(self): - """Return a `str` version of this TrainingExampleList object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class TrainingQuery(object): - """ - TrainingQuery. - - :attr str query_id: (optional) The query ID associated with the training query. - :attr str natural_language_query: (optional) The natural text query for the training - query. - :attr str filter: (optional) The filter used on the collection before the - **natural_language_query** is applied. - :attr list[TrainingExample] examples: (optional) Array of training examples. - """ - - def __init__(self, - query_id=None, - natural_language_query=None, - filter=None, - examples=None): - """ - Initialize a TrainingQuery object. - - :param str query_id: (optional) The query ID associated with the training query. - :param str natural_language_query: (optional) The natural text query for the - training query. - :param str filter: (optional) The filter used on the collection before the - **natural_language_query** is applied. - :param list[TrainingExample] examples: (optional) Array of training examples. - """ - self.query_id = query_id - self.natural_language_query = natural_language_query - self.filter = filter - self.examples = examples - - @classmethod - def _from_dict(cls, _dict): - """Initialize a TrainingQuery object from a json dictionary.""" - args = {} - if 'query_id' in _dict: - args['query_id'] = _dict.get('query_id') - if 'natural_language_query' in _dict: - args['natural_language_query'] = _dict.get('natural_language_query') - if 'filter' in _dict: - args['filter'] = _dict.get('filter') - if 'examples' in _dict: - args['examples'] = [ - TrainingExample._from_dict(x) for x in (_dict.get('examples')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'query_id') and self.query_id is not None: - _dict['query_id'] = self.query_id - if hasattr(self, 'natural_language_query' - ) and self.natural_language_query is not None: - _dict['natural_language_query'] = self.natural_language_query - if hasattr(self, 'filter') and self.filter is not None: - _dict['filter'] = self.filter - if hasattr(self, 'examples') and self.examples is not None: - _dict['examples'] = [x._to_dict() for x in self.examples] - return _dict - - def __str__(self): - """Return a `str` version of this TrainingQuery object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class TrainingStatus(object): - """ - TrainingStatus. - - :attr int total_examples: (optional) The total number of training examples uploaded to - this collection. - :attr bool available: (optional) When `true`, the collection has been successfully - trained. - :attr bool processing: (optional) When `true`, the collection is currently processing - training. - :attr bool minimum_queries_added: (optional) When `true`, the collection has a - sufficent amount of queries added for training to occur. - :attr bool minimum_examples_added: (optional) When `true`, the collection has a - sufficent amount of examples added for training to occur. - :attr bool sufficient_label_diversity: (optional) When `true`, the collection has a - sufficent amount of diversity in labeled results for training to occur. - :attr int notices: (optional) The number of notices associated with this data set. - :attr datetime successfully_trained: (optional) The timestamp of when the collection - was successfully trained. - :attr datetime data_updated: (optional) The timestamp of when the data was uploaded. - """ - - def __init__(self, - total_examples=None, - available=None, - processing=None, - minimum_queries_added=None, - minimum_examples_added=None, - sufficient_label_diversity=None, - notices=None, - successfully_trained=None, - data_updated=None): - """ - Initialize a TrainingStatus object. - - :param int total_examples: (optional) The total number of training examples - uploaded to this collection. - :param bool available: (optional) When `true`, the collection has been - successfully trained. - :param bool processing: (optional) When `true`, the collection is currently - processing training. - :param bool minimum_queries_added: (optional) When `true`, the collection has a - sufficent amount of queries added for training to occur. - :param bool minimum_examples_added: (optional) When `true`, the collection has a - sufficent amount of examples added for training to occur. - :param bool sufficient_label_diversity: (optional) When `true`, the collection has - a sufficent amount of diversity in labeled results for training to occur. - :param int notices: (optional) The number of notices associated with this data - set. - :param datetime successfully_trained: (optional) The timestamp of when the - collection was successfully trained. - :param datetime data_updated: (optional) The timestamp of when the data was - uploaded. - """ - self.total_examples = total_examples - self.available = available - self.processing = processing - self.minimum_queries_added = minimum_queries_added - self.minimum_examples_added = minimum_examples_added - self.sufficient_label_diversity = sufficient_label_diversity - self.notices = notices - self.successfully_trained = successfully_trained - self.data_updated = data_updated - - @classmethod - def _from_dict(cls, _dict): - """Initialize a TrainingStatus object from a json dictionary.""" - args = {} - if 'total_examples' in _dict: - args['total_examples'] = _dict.get('total_examples') - if 'available' in _dict: - args['available'] = _dict.get('available') - if 'processing' in _dict: - args['processing'] = _dict.get('processing') - if 'minimum_queries_added' in _dict: - args['minimum_queries_added'] = _dict.get('minimum_queries_added') - if 'minimum_examples_added' in _dict: - args['minimum_examples_added'] = _dict.get('minimum_examples_added') - if 'sufficient_label_diversity' in _dict: - args['sufficient_label_diversity'] = _dict.get( - 'sufficient_label_diversity') - if 'notices' in _dict: - args['notices'] = _dict.get('notices') - if 'successfully_trained' in _dict: - args['successfully_trained'] = string_to_datetime( - _dict.get('successfully_trained')) - if 'data_updated' in _dict: - args['data_updated'] = string_to_datetime(_dict.get('data_updated')) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'total_examples') and self.total_examples is not None: - _dict['total_examples'] = self.total_examples - if hasattr(self, 'available') and self.available is not None: - _dict['available'] = self.available - if hasattr(self, 'processing') and self.processing is not None: - _dict['processing'] = self.processing - if hasattr(self, 'minimum_queries_added' - ) and self.minimum_queries_added is not None: - _dict['minimum_queries_added'] = self.minimum_queries_added - if hasattr(self, 'minimum_examples_added' - ) and self.minimum_examples_added is not None: - _dict['minimum_examples_added'] = self.minimum_examples_added - if hasattr(self, 'sufficient_label_diversity' - ) and self.sufficient_label_diversity is not None: - _dict[ - 'sufficient_label_diversity'] = self.sufficient_label_diversity - if hasattr(self, 'notices') and self.notices is not None: - _dict['notices'] = self.notices - if hasattr(self, 'successfully_trained' - ) and self.successfully_trained is not None: - _dict['successfully_trained'] = datetime_to_string( - self.successfully_trained) - if hasattr(self, 'data_updated') and self.data_updated is not None: - _dict['data_updated'] = datetime_to_string(self.data_updated) - return _dict - - def __str__(self): - """Return a `str` version of this TrainingStatus object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class WordHeadingDetection(object): - """ - WordHeadingDetection. - - :attr list[FontSetting] fonts: (optional) - :attr list[WordStyle] styles: (optional) - """ - - def __init__(self, fonts=None, styles=None): - """ - Initialize a WordHeadingDetection object. - - :param list[FontSetting] fonts: (optional) - :param list[WordStyle] styles: (optional) - """ - self.fonts = fonts - self.styles = styles - - @classmethod - def _from_dict(cls, _dict): - """Initialize a WordHeadingDetection object from a json dictionary.""" - args = {} - if 'fonts' in _dict: - args['fonts'] = [ - FontSetting._from_dict(x) for x in (_dict.get('fonts')) - ] - if 'styles' in _dict: - args['styles'] = [ - WordStyle._from_dict(x) for x in (_dict.get('styles')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'fonts') and self.fonts is not None: - _dict['fonts'] = [x._to_dict() for x in self.fonts] - if hasattr(self, 'styles') and self.styles is not None: - _dict['styles'] = [x._to_dict() for x in self.styles] - return _dict - - def __str__(self): - """Return a `str` version of this WordHeadingDetection object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class WordSettings(object): - """ - A list of Word conversion settings. - - :attr WordHeadingDetection heading: (optional) - """ - - def __init__(self, heading=None): - """ - Initialize a WordSettings object. - - :param WordHeadingDetection heading: (optional) - """ - self.heading = heading - - @classmethod - def _from_dict(cls, _dict): - """Initialize a WordSettings object from a json dictionary.""" - args = {} - if 'heading' in _dict: - args['heading'] = WordHeadingDetection._from_dict( - _dict.get('heading')) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'heading') and self.heading is not None: - _dict['heading'] = self.heading._to_dict() - return _dict - - def __str__(self): - """Return a `str` version of this WordSettings object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class WordStyle(object): - """ - WordStyle. - - :attr int level: (optional) HTML head level that content matching this style is tagged - with. - :attr list[str] names: (optional) Array of word style names to convert. - """ - - def __init__(self, level=None, names=None): - """ - Initialize a WordStyle object. - - :param int level: (optional) HTML head level that content matching this style is - tagged with. - :param list[str] names: (optional) Array of word style names to convert. - """ - self.level = level - self.names = names - - @classmethod - def _from_dict(cls, _dict): - """Initialize a WordStyle object from a json dictionary.""" - args = {} - if 'level' in _dict: - args['level'] = _dict.get('level') - if 'names' in _dict: - args['names'] = _dict.get('names') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'level') and self.level is not None: - _dict['level'] = self.level - if hasattr(self, 'names') and self.names is not None: - _dict['names'] = self.names - return _dict - - def __str__(self): - """Return a `str` version of this WordStyle object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class XPathPatterns(object): - """ - XPathPatterns. - - :attr list[str] xpaths: (optional) An array to XPaths. - """ - - def __init__(self, xpaths=None): - """ - Initialize a XPathPatterns object. - - :param list[str] xpaths: (optional) An array to XPaths. - """ - self.xpaths = xpaths - - @classmethod - def _from_dict(cls, _dict): - """Initialize a XPathPatterns object from a json dictionary.""" - args = {} - if 'xpaths' in _dict: - args['xpaths'] = _dict.get('xpaths') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'xpaths') and self.xpaths is not None: - _dict['xpaths'] = self.xpaths - return _dict - - def __str__(self): - """Return a `str` version of this XPathPatterns object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other diff --git a/ibm_watson/discovery_v2.py b/ibm_watson/discovery_v2.py new file mode 100644 index 000000000..642007426 --- /dev/null +++ b/ibm_watson/discovery_v2.py @@ -0,0 +1,14422 @@ +# coding: utf-8 + +# (C) Copyright IBM Corp. 2019, 2025. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# IBM OpenAPI SDK Code Generator Version: 3.105.0-3c13b041-20250605-193116 +""" +IBM Watson® Discovery is a cognitive search and content analytics engine that you can +add to applications to identify patterns, trends and actionable insights to drive better +decision-making. Securely unify structured and unstructured data with pre-enriched +content, and use a simplified query language to eliminate the need for manual filtering of +results. + +API Version: 2.0 +See: https://cloud.ibm.com/docs/discovery-data +""" + +from datetime import datetime +from enum import Enum +from os.path import basename +from typing import BinaryIO, Dict, List, Optional +import json +import sys + +from ibm_cloud_sdk_core import BaseService, DetailedResponse +from ibm_cloud_sdk_core.authenticators.authenticator import Authenticator +from ibm_cloud_sdk_core.get_authenticator import get_authenticator_from_environment +from ibm_cloud_sdk_core.utils import convert_list, convert_model, datetime_to_string, string_to_datetime + +from .common import get_sdk_headers + +############################################################################## +# Service +############################################################################## + + +class DiscoveryV2(BaseService): + """The Discovery V2 service.""" + + DEFAULT_SERVICE_URL = 'https://api.us-south.discovery.watson.cloud.ibm.com' + DEFAULT_SERVICE_NAME = 'discovery' + + def __init__( + self, + version: str, + authenticator: Authenticator = None, + service_name: str = DEFAULT_SERVICE_NAME, + ) -> None: + """ + Construct a new client for the Discovery service. + + :param str version: Release date of the version of the API you want to use. + Specify dates in YYYY-MM-DD format. The current version is `2023-03-31`. + + :param Authenticator authenticator: The authenticator specifies the authentication mechanism. + Get up to date information from https://github.com/IBM/python-sdk-core/blob/main/README.md + about initializing the authenticator of your choice. + """ + if version is None: + raise ValueError('version must be provided') + + if not authenticator: + authenticator = get_authenticator_from_environment(service_name) + BaseService.__init__(self, + service_url=self.DEFAULT_SERVICE_URL, + authenticator=authenticator) + self.version = version + self.configure_service(service_name) + + ######################### + # Projects + ######################### + + def list_projects( + self, + **kwargs, + ) -> DetailedResponse: + """ + List projects. + + Lists existing projects for this instance. + + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `ListProjectsResponse` object + """ + + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='list_projects', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + url = '/v2/projects' + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def create_project( + self, + name: str, + type: str, + *, + default_query_parameters: Optional['DefaultQueryParams'] = None, + **kwargs, + ) -> DetailedResponse: + """ + Create a project. + + Create a new project for this instance. + + :param str name: The human readable name of this project. + :param str type: The type of project. + The `content_intelligence` type is a *Document Retrieval for Contracts* + project and the `other` type is a *Custom* project. + The `content_mining` and `content_intelligence` types are available with + Premium plan managed deployments and installed deployments only. + The Intelligent Document Processing (IDP) project type is available from + IBM Cloud-managed instances only. + :param DefaultQueryParams default_query_parameters: (optional) Default + query parameters for this project. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `ProjectDetails` object + """ + + if name is None: + raise ValueError('name must be provided') + if type is None: + raise ValueError('type must be provided') + if default_query_parameters is not None: + default_query_parameters = convert_model(default_query_parameters) + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='create_project', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + data = { + 'name': name, + 'type': type, + 'default_query_parameters': default_query_parameters, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + url = '/v2/projects' + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + data=data, + ) + + response = self.send(request, **kwargs) + return response + + def get_project( + self, + project_id: str, + **kwargs, + ) -> DetailedResponse: + """ + Get project. + + Get details on the specified project. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `ProjectDetails` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='get_project', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id'] + path_param_values = self.encode_path_vars(project_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}'.format(**path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def update_project( + self, + project_id: str, + *, + name: Optional[str] = None, + **kwargs, + ) -> DetailedResponse: + """ + Update a project. + + Update the specified project's name. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str name: (optional) The new name to give this project. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `ProjectDetails` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='update_project', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + data = { + 'name': name, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id'] + path_param_values = self.encode_path_vars(project_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}'.format(**path_param_dict) + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + data=data, + ) + + response = self.send(request, **kwargs) + return response + + def delete_project( + self, + project_id: str, + **kwargs, + ) -> DetailedResponse: + """ + Delete a project. + + Deletes the specified project. + **Important:** Deleting a project deletes everything that is part of the specified + project, including all collections. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse + """ + + if not project_id: + raise ValueError('project_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='delete_project', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + + path_param_keys = ['project_id'] + path_param_values = self.encode_path_vars(project_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}'.format(**path_param_dict) + request = self.prepare_request( + method='DELETE', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def list_fields( + self, + project_id: str, + *, + collection_ids: Optional[List[str]] = None, + **kwargs, + ) -> DetailedResponse: + """ + List fields. + + Gets a list of the unique fields (and their types) stored in the specified + collections. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param List[str] collection_ids: (optional) Comma separated list of the + collection IDs. If this parameter is not specified, all collections in the + project are used. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `ListFieldsResponse` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='list_fields', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + 'collection_ids': convert_list(collection_ids), + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id'] + path_param_values = self.encode_path_vars(project_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/fields'.format(**path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + ######################### + # Collections + ######################### + + def list_collections( + self, + project_id: str, + **kwargs, + ) -> DetailedResponse: + """ + List collections. + + Lists existing collections for the specified project. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `ListCollectionsResponse` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='list_collections', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id'] + path_param_values = self.encode_path_vars(project_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/collections'.format(**path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def create_collection( + self, + project_id: str, + name: str, + *, + description: Optional[str] = None, + language: Optional[str] = None, + ocr_enabled: Optional[bool] = None, + enrichments: Optional[List['CollectionEnrichment']] = None, + **kwargs, + ) -> DetailedResponse: + """ + Create a collection. + + Create a new collection in the specified project. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str name: The name of the collection. + :param str description: (optional) A description of the collection. + :param str language: (optional) The language of the collection. For a list + of supported languages, see the [product + documentation](/docs/discovery-data?topic=discovery-data-language-support). + :param bool ocr_enabled: (optional) If set to `true`, optical character + recognition (OCR) is enabled. For more information, see [Optical character + recognition](/docs/discovery-data?topic=discovery-data-collections#ocr). + :param List[CollectionEnrichment] enrichments: (optional) An array of + enrichments that are applied to this collection. To get a list of + enrichments that are available for a project, use the [List + enrichments](#listenrichments) method. + If no enrichments are specified when the collection is created, the default + enrichments for the project type are applied. For more information about + project default settings, see the [product + documentation](/docs/discovery-data?topic=discovery-data-project-defaults). + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `CollectionDetails` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + if name is None: + raise ValueError('name must be provided') + if enrichments is not None: + enrichments = [convert_model(x) for x in enrichments] + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='create_collection', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + data = { + 'name': name, + 'description': description, + 'language': language, + 'ocr_enabled': ocr_enabled, + 'enrichments': enrichments, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id'] + path_param_values = self.encode_path_vars(project_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/collections'.format(**path_param_dict) + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + data=data, + ) + + response = self.send(request, **kwargs) + return response + + def get_collection( + self, + project_id: str, + collection_id: str, + **kwargs, + ) -> DetailedResponse: + """ + Get collection details. + + Get details about the specified collection. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str collection_id: The Universally Unique Identifier (UUID) of the + collection. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `CollectionDetails` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + if not collection_id: + raise ValueError('collection_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='get_collection', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id', 'collection_id'] + path_param_values = self.encode_path_vars(project_id, collection_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/collections/{collection_id}'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def update_collection( + self, + project_id: str, + collection_id: str, + *, + name: Optional[str] = None, + description: Optional[str] = None, + ocr_enabled: Optional[bool] = None, + enrichments: Optional[List['CollectionEnrichment']] = None, + **kwargs, + ) -> DetailedResponse: + """ + Update a collection. + + Updates the specified collection's name, description, enrichments, and + configuration. + If you apply normalization rules to data in an existing collection, you must + initiate reprocessing of the collection. To do so, from the *Manage fields* page + in the product user interface, temporarily change the data type of a field to + enable the reprocess button. Change the data type of the field back to its + original value, and then click **Apply changes and reprocess**. + To remove a configuration that applies JSON normalization operations as part of + the conversion phase of ingestion, specify an empty `json_normalizations` object + (`[]`) in the request. + To remove a configuration that applies JSON normalization operations after + enrichments are applied, specify an empty `normalizations` object (`[]`) in the + request. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str collection_id: The Universally Unique Identifier (UUID) of the + collection. + :param str name: (optional) The new name of the collection. + :param str description: (optional) The new description of the collection. + :param bool ocr_enabled: (optional) If set to `true`, optical character + recognition (OCR) is enabled. For more information, see [Optical character + recognition](/docs/discovery-data?topic=discovery-data-collections#ocr). + :param List[CollectionEnrichment] enrichments: (optional) An array of + enrichments that are applied to this collection. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `CollectionDetails` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + if not collection_id: + raise ValueError('collection_id must be provided') + if enrichments is not None: + enrichments = [convert_model(x) for x in enrichments] + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='update_collection', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + data = { + 'name': name, + 'description': description, + 'ocr_enabled': ocr_enabled, + 'enrichments': enrichments, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id', 'collection_id'] + path_param_values = self.encode_path_vars(project_id, collection_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/collections/{collection_id}'.format( + **path_param_dict) + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + data=data, + ) + + response = self.send(request, **kwargs) + return response + + def delete_collection( + self, + project_id: str, + collection_id: str, + **kwargs, + ) -> DetailedResponse: + """ + Delete a collection. + + Deletes the specified collection from the project. All documents stored in the + specified collection and not shared is also deleted. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str collection_id: The Universally Unique Identifier (UUID) of the + collection. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse + """ + + if not project_id: + raise ValueError('project_id must be provided') + if not collection_id: + raise ValueError('collection_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='delete_collection', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + + path_param_keys = ['project_id', 'collection_id'] + path_param_values = self.encode_path_vars(project_id, collection_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/collections/{collection_id}'.format( + **path_param_dict) + request = self.prepare_request( + method='DELETE', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + ######################### + # Documents + ######################### + + def list_documents( + self, + project_id: str, + collection_id: str, + *, + count: Optional[int] = None, + status: Optional[str] = None, + has_notices: Optional[bool] = None, + is_parent: Optional[bool] = None, + parent_document_id: Optional[str] = None, + sha256: Optional[str] = None, + **kwargs, + ) -> DetailedResponse: + """ + List documents. + + Lists the documents in the specified collection. The list includes only the + document ID of each document and returns information for up to 10,000 documents. + **Note**: This method is available only from Cloud Pak for Data version 4.0.9 and + later installed instances, and from IBM Cloud-managed instances. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str collection_id: The Universally Unique Identifier (UUID) of the + collection. + :param int count: (optional) The maximum number of documents to return. Up + to 1,000 documents are returned by default. The maximum number allowed is + 10,000. + :param str status: (optional) Filters the documents to include only + documents with the specified ingestion status. The options include: + * `available`: Ingestion is finished and the document is indexed. + * `failed`: Ingestion is finished, but the document is not indexed because + of an error. + * `pending`: The document is uploaded, but the ingestion process is not + started. + * `processing`: Ingestion is in progress. + You can specify one status value or add a comma-separated list of more than + one status value. For example, `available,failed`. + :param bool has_notices: (optional) If set to `true`, only documents that + have notices, meaning documents for which warnings or errors were generated + during the ingestion, are returned. If set to `false`, only documents that + don't have notices are returned. If unspecified, no filter based on notices + is applied. + Notice details are not available in the result, but you can use the [Query + collection notices](#querycollectionnotices) method to find details by + adding the parameter `query=notices.document_id:{document-id}`. + :param bool is_parent: (optional) If set to `true`, only parent documents, + meaning documents that were split during the ingestion process and resulted + in two or more child documents, are returned. If set to `false`, only child + documents are returned. If unspecified, no filter based on the parent or + child relationship is applied. + CSV files, for example, are split into separate documents per line and JSON + files are split into separate documents per object. + :param str parent_document_id: (optional) Filters the documents to include + only child documents that were generated when the specified parent document + was processed. + :param str sha256: (optional) Filters the documents to include only + documents with the specified SHA-256 hash. Format the hash as a hexadecimal + string. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `ListDocumentsResponse` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + if not collection_id: + raise ValueError('collection_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='list_documents', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + 'count': count, + 'status': status, + 'has_notices': has_notices, + 'is_parent': is_parent, + 'parent_document_id': parent_document_id, + 'sha256': sha256, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id', 'collection_id'] + path_param_values = self.encode_path_vars(project_id, collection_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/collections/{collection_id}/documents'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def add_document( + self, + project_id: str, + collection_id: str, + *, + file: Optional[BinaryIO] = None, + filename: Optional[str] = None, + file_content_type: Optional[str] = None, + metadata: Optional[str] = None, + x_watson_discovery_force: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: + """ + Add a document. + + Add a document to a collection with optional metadata. + Returns immediately after the system has accepted the document for processing. + Use this method to upload a file to the collection. You cannot use this method to + crawl an external data source. + * For a list of supported file types, see the [product + documentation](/docs/discovery-data?topic=discovery-data-collections#supportedfiletypes). + * You must provide document content, metadata, or both. If the request is missing + both document content and metadata, it is rejected. + * You can set the **Content-Type** parameter on the **file** part to indicate + the media type of the document. If the **Content-Type** parameter is missing or is + one of the generic media types (for example, `application/octet-stream`), then the + service attempts to automatically detect the document's media type. + * If the document is uploaded to a collection that shares its data with another + collection, the **X-Watson-Discovery-Force** header must be set to `true`. + * In curl requests only, you can assign an ID to a document that you add by + appending the ID to the endpoint + (`/v2/projects/{project_id}/collections/{collection_id}/documents/{document_id}`). + If a document already exists with the specified ID, it is replaced. + For more information about how certain file types and field names are handled when + a file is added to a collection, see the [product + documentation](/docs/discovery-data?topic=discovery-data-index-overview#field-name-limits). + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str collection_id: The Universally Unique Identifier (UUID) of the + collection. + :param BinaryIO file: (optional) **Add a document**: The content of the + document to ingest. For the supported file types and maximum supported file + size limits when adding a document, see [the + documentation](/docs/discovery-data?topic=discovery-data-collections#supportedfiletypes). + **Analyze a document**: The content of the document to analyze but not + ingest. Only the `application/json` content type is supported by the + Analyze API. For maximum supported file size limits, see [the product + documentation](/docs/discovery-data?topic=discovery-data-analyzeapi#analyzeapi-limits). + :param str filename: (optional) The filename for file. + :param str file_content_type: (optional) The content type of file. + :param str metadata: (optional) Add information about the file that you + want to include in the response. + The maximum supported metadata file size is 1 MB. Metadata parts larger + than 1 MB are rejected. + Example: + ``` + { + "filename": "favorites2.json", + "file_type": "json" + }. + :param bool x_watson_discovery_force: (optional) When `true`, the uploaded + document is added to the collection even if the data for that collection is + shared with other collections. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `DocumentAccepted` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + if not collection_id: + raise ValueError('collection_id must be provided') + headers = { + 'X-Watson-Discovery-Force': x_watson_discovery_force, + } + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='add_document', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + form_data = [] + if file: + if not filename and hasattr(file, 'name'): + filename = basename(file.name) + if not filename: + raise ValueError('filename must be provided') + form_data.append(('file', (filename, file, file_content_type or + 'application/octet-stream'))) + if metadata: + form_data.append(('metadata', (None, metadata, 'text/plain'))) + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id', 'collection_id'] + path_param_values = self.encode_path_vars(project_id, collection_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/collections/{collection_id}/documents'.format( + **path_param_dict) + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + files=form_data, + ) + + response = self.send(request, **kwargs) + return response + + def get_document( + self, + project_id: str, + collection_id: str, + document_id: str, + **kwargs, + ) -> DetailedResponse: + """ + Get document details. + + Get details about a specific document, whether the document is added by uploading + a file or by crawling an external data source. + **Note**: This method is available only from Cloud Pak for Data version 4.0.9 and + later installed instances, and from IBM Cloud-managed instances. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str collection_id: The Universally Unique Identifier (UUID) of the + collection. + :param str document_id: The ID of the document. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `DocumentDetails` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + if not collection_id: + raise ValueError('collection_id must be provided') + if not document_id: + raise ValueError('document_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='get_document', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id', 'collection_id', 'document_id'] + path_param_values = self.encode_path_vars(project_id, collection_id, + document_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/collections/{collection_id}/documents/{document_id}'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def update_document( + self, + project_id: str, + collection_id: str, + document_id: str, + *, + file: Optional[BinaryIO] = None, + filename: Optional[str] = None, + file_content_type: Optional[str] = None, + metadata: Optional[str] = None, + x_watson_discovery_force: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: + """ + Update a document. + + Replace an existing document or add a document with a specified document ID. + Starts ingesting a document with optional metadata. + Use this method to upload a file to a collection. You cannot use this method to + crawl an external data source. + If the document is uploaded to a collection that shares its data with another + collection, the **X-Watson-Discovery-Force** header must be set to `true`. + **Notes:** + * Uploading a new document with this method automatically replaces any existing + document stored with the same document ID. + * If an uploaded document is split into child documents during ingestion, all + existing child documents are overwritten, even if the updated version of the + document has fewer child documents. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str collection_id: The Universally Unique Identifier (UUID) of the + collection. + :param str document_id: The ID of the document. + :param BinaryIO file: (optional) **Add a document**: The content of the + document to ingest. For the supported file types and maximum supported file + size limits when adding a document, see [the + documentation](/docs/discovery-data?topic=discovery-data-collections#supportedfiletypes). + **Analyze a document**: The content of the document to analyze but not + ingest. Only the `application/json` content type is supported by the + Analyze API. For maximum supported file size limits, see [the product + documentation](/docs/discovery-data?topic=discovery-data-analyzeapi#analyzeapi-limits). + :param str filename: (optional) The filename for file. + :param str file_content_type: (optional) The content type of file. + :param str metadata: (optional) Add information about the file that you + want to include in the response. + The maximum supported metadata file size is 1 MB. Metadata parts larger + than 1 MB are rejected. + Example: + ``` + { + "filename": "favorites2.json", + "file_type": "json" + }. + :param bool x_watson_discovery_force: (optional) When `true`, the uploaded + document is added to the collection even if the data for that collection is + shared with other collections. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `DocumentAccepted` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + if not collection_id: + raise ValueError('collection_id must be provided') + if not document_id: + raise ValueError('document_id must be provided') + headers = { + 'X-Watson-Discovery-Force': x_watson_discovery_force, + } + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='update_document', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + form_data = [] + if file: + if not filename and hasattr(file, 'name'): + filename = basename(file.name) + if not filename: + raise ValueError('filename must be provided') + form_data.append(('file', (filename, file, file_content_type or + 'application/octet-stream'))) + if metadata: + form_data.append(('metadata', (None, metadata, 'text/plain'))) + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id', 'collection_id', 'document_id'] + path_param_values = self.encode_path_vars(project_id, collection_id, + document_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/collections/{collection_id}/documents/{document_id}'.format( + **path_param_dict) + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + files=form_data, + ) + + response = self.send(request, **kwargs) + return response + + def delete_document( + self, + project_id: str, + collection_id: str, + document_id: str, + *, + x_watson_discovery_force: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: + """ + Delete a document. + + Deletes the document with the document ID that you specify from the collection. + Removes uploaded documents from the collection permanently. If you delete a + document that was added by crawling an external data source, the document will be + added again with the next scheduled crawl of the data source. The delete function + removes the document from the collection, not from the external data source. + **Note:** Files such as CSV or JSON files generate subdocuments when they are + added to a collection. If you delete a subdocument, and then repeat the action + that created it, the deleted document is added back in to your collection. To + remove subdocuments that are generated by an uploaded file, delete the original + document instead. You can get the document ID of the original document from the + `parent_document_id` of the subdocument result. + If the document with the given document ID exists, Watson Discovery first marks or + tags the document as deleted when it sends the 200 response code. At a later time + (within a couple of minutes unless the document has many child documents), it + removes the document from the collection. + There is no bulk document delete API. Documents must be deleted one at a time + using this API. However, you can delete a collection, and all the documents from + the collection are removed along with the collection. + The document will be deleted from the given collection only, not from the + corresponding data source. Wherever relevant, an incremental crawl will not bring + back the document into Watson Discovery from the data source. Only a full crawl + will retrieve the deleted document back from the data source provided it is still + present in the same data source. + Finally, if multiple collections share the same dataset, deleting a document from + a collection will remove it from that collection only (in other remaining + collections the document will still exist). The document will be removed from the + dataset, if this document is deleted from all the collections that share the same + dataset. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str collection_id: The Universally Unique Identifier (UUID) of the + collection. + :param str document_id: The ID of the document. + :param bool x_watson_discovery_force: (optional) When `true`, the uploaded + document is added to the collection even if the data for that collection is + shared with other collections. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `DeleteDocumentResponse` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + if not collection_id: + raise ValueError('collection_id must be provided') + if not document_id: + raise ValueError('document_id must be provided') + headers = { + 'X-Watson-Discovery-Force': x_watson_discovery_force, + } + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='delete_document', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id', 'collection_id', 'document_id'] + path_param_values = self.encode_path_vars(project_id, collection_id, + document_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/collections/{collection_id}/documents/{document_id}'.format( + **path_param_dict) + request = self.prepare_request( + method='DELETE', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + ######################### + # Queries + ######################### + + def query( + self, + project_id: str, + *, + collection_ids: Optional[List[str]] = None, + filter: Optional[str] = None, + query: Optional[str] = None, + natural_language_query: Optional[str] = None, + aggregation: Optional[str] = None, + count: Optional[int] = None, + return_: Optional[List[str]] = None, + offset: Optional[int] = None, + sort: Optional[str] = None, + highlight: Optional[bool] = None, + spelling_suggestions: Optional[bool] = None, + table_results: Optional['QueryLargeTableResults'] = None, + suggested_refinements: Optional[ + 'QueryLargeSuggestedRefinements'] = None, + passages: Optional['QueryLargePassages'] = None, + similar: Optional['QueryLargeSimilar'] = None, + **kwargs, + ) -> DetailedResponse: + """ + Query a project. + + Search your data by submitting queries that are written in natural language or + formatted in the Discovery Query Language. For more information, see the + [Discovery + documentation](/docs/discovery-data?topic=discovery-data-query-concepts). The + default query parameters differ by project type. For more information about the + project default settings, see the [Discovery + documentation](/docs/discovery-data?topic=discovery-data-query-defaults). See [the + Projects API documentation](#create-project) for details about how to set custom + default query settings. + The length of the UTF-8 encoding of the POST body cannot exceed 10,000 bytes, + which is roughly equivalent to 10,000 characters in English. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param List[str] collection_ids: (optional) A comma-separated list of + collection IDs to be queried against. + :param str filter: (optional) Searches for documents that match the + Discovery Query Language criteria that is specified as input. Filter calls + are cached and are faster than query calls because the results are not + ordered by relevance. When used with the **aggregation**, **query**, or + **natural_language_query** parameters, the **filter** parameter runs first. + This parameter is useful for limiting results to those that contain + specific metadata values. + :param str query: (optional) A query search that is written in the + Discovery Query Language and returns all matching documents in your data + set with full enrichments and full text, and with the most relevant + documents listed first. Use a query search when you want to find the most + relevant search results. You can use this parameter or the + **natural_language_query** parameter to specify the query input, but not + both. + :param str natural_language_query: (optional) A natural language query that + returns relevant documents by using training data and natural language + understanding. You can use this parameter or the **query** parameter to + specify the query input, but not both. To filter the results based on + criteria you specify, include the **filter** parameter in the request. + :param str aggregation: (optional) An aggregation search that returns an + exact answer by combining query search with filters. Useful for + applications to build lists, tables, and time series. For more information + about the supported types of aggregations, see the [Discovery + documentation](/docs/discovery-data?topic=discovery-data-query-aggregations). + :param int count: (optional) Number of results to return. + :param List[str] return_: (optional) A list of the fields in the document + hierarchy to return. You can specify both root-level (`text`) and nested + (`extracted_metadata.filename`) fields. If this parameter is an empty list, + then all fields are returned. + :param int offset: (optional) The number of query results to skip at the + beginning. Consider that the `count` is set to 10 (the default value) and + the total number of results that are returned is 100. In this case, the + following examples show the returned results for different `offset` values: + * If `offset` is set to 95, it returns the last 5 results. + * If `offset` is set to 10, it returns the second batch of 10 results. + * If `offset` is set to 100 or more, it returns empty results. + :param str sort: (optional) A comma-separated list of fields in the + document to sort on. You can optionally specify a sort direction by + prefixing the field with `-` for descending or `+` for ascending. Ascending + is the default sort direction if no prefix is specified. + :param bool highlight: (optional) When `true`, a highlight field is + returned for each result that contains fields that match the query. The + matching query terms are emphasized with surrounding `` tags. This + parameter is ignored if **passages.enabled** and **passages.per_document** + are `true`, in which case passages are returned for each document instead + of highlights. + :param bool spelling_suggestions: (optional) When `true` and the + **natural_language_query** parameter is used, the + **natural_language_query** parameter is spell checked. The most likely + correction is returned in the **suggested_query** field of the response (if + one exists). + :param QueryLargeTableResults table_results: (optional) Configuration for + table retrieval. + :param QueryLargeSuggestedRefinements suggested_refinements: (optional) + Configuration for suggested refinements. + **Note**: The **suggested_refinements** parameter that identified dynamic + facets from the data is deprecated. + :param QueryLargePassages passages: (optional) Configuration for passage + retrieval. + :param QueryLargeSimilar similar: (optional) Finds results from documents + that are similar to documents of interest. Use this parameter to add a + *More like these* function to your search. You can include this parameter + with or without a **query**, **filter** or **natural_language_query** + parameter. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `QueryResponse` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + if table_results is not None: + table_results = convert_model(table_results) + if suggested_refinements is not None: + suggested_refinements = convert_model(suggested_refinements) + if passages is not None: + passages = convert_model(passages) + if similar is not None: + similar = convert_model(similar) + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='query', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + data = { + 'collection_ids': collection_ids, + 'filter': filter, + 'query': query, + 'natural_language_query': natural_language_query, + 'aggregation': aggregation, + 'count': count, + 'return': return_, + 'offset': offset, + 'sort': sort, + 'highlight': highlight, + 'spelling_suggestions': spelling_suggestions, + 'table_results': table_results, + 'suggested_refinements': suggested_refinements, + 'passages': passages, + 'similar': similar, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id'] + path_param_values = self.encode_path_vars(project_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/query'.format(**path_param_dict) + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + data=data, + ) + + response = self.send(request, **kwargs) + return response + + def get_autocompletion( + self, + project_id: str, + prefix: str, + *, + collection_ids: Optional[List[str]] = None, + field: Optional[str] = None, + count: Optional[int] = None, + **kwargs, + ) -> DetailedResponse: + """ + Get Autocomplete Suggestions. + + Returns completion query suggestions for the specified prefix. + Suggested words are based on terms from the project documents. Suggestions are not + based on terms from the project's search history, and the project does not learn + from previous user choices. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str prefix: The prefix to use for autocompletion. For example, the + prefix `Ho` could autocomplete to `hot`, `housing`, or `how`. + :param List[str] collection_ids: (optional) Comma separated list of the + collection IDs. If this parameter is not specified, all collections in the + project are used. + :param str field: (optional) The field in the result documents that + autocompletion suggestions are identified from. + :param int count: (optional) The number of autocompletion suggestions to + return. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `Completions` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + if not prefix: + raise ValueError('prefix must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='get_autocompletion', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + 'prefix': prefix, + 'collection_ids': convert_list(collection_ids), + 'field': field, + 'count': count, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id'] + path_param_values = self.encode_path_vars(project_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/autocompletion'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def query_collection_notices( + self, + project_id: str, + collection_id: str, + *, + filter: Optional[str] = None, + query: Optional[str] = None, + natural_language_query: Optional[str] = None, + count: Optional[int] = None, + offset: Optional[int] = None, + **kwargs, + ) -> DetailedResponse: + """ + Query collection notices. + + Finds collection-level notices (errors and warnings) that are generated when + documents are ingested. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str collection_id: The Universally Unique Identifier (UUID) of the + collection. + :param str filter: (optional) Searches for documents that match the + Discovery Query Language criteria that is specified as input. Filter calls + are cached and are faster than query calls because the results are not + ordered by relevance. When used with the `aggregation`, `query`, or + `natural_language_query` parameters, the `filter` parameter runs first. + This parameter is useful for limiting results to those that contain + specific metadata values. + :param str query: (optional) A query search that is written in the + Discovery Query Language and returns all matching documents in your data + set with full enrichments and full text, and with the most relevant + documents listed first. You can use this parameter or the + **natural_language_query** parameter to specify the query input, but not + both. + :param str natural_language_query: (optional) A natural language query that + returns relevant documents by using natural language understanding. You can + use this parameter or the **query** parameter to specify the query input, + but not both. To filter the results based on criteria you specify, include + the **filter** parameter in the request. + :param int count: (optional) Number of results to return. The maximum for + the **count** and **offset** values together in any one query is + **10,000**. + :param int offset: (optional) The number of query results to skip at the + beginning. For example, if the total number of results that are returned is + 10 and the offset is 8, it returns the last two results. The maximum for + the **count** and **offset** values together in any one query is **10000**. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `QueryNoticesResponse` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + if not collection_id: + raise ValueError('collection_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='query_collection_notices', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + 'filter': filter, + 'query': query, + 'natural_language_query': natural_language_query, + 'count': count, + 'offset': offset, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id', 'collection_id'] + path_param_values = self.encode_path_vars(project_id, collection_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/collections/{collection_id}/notices'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def query_notices( + self, + project_id: str, + *, + filter: Optional[str] = None, + query: Optional[str] = None, + natural_language_query: Optional[str] = None, + count: Optional[int] = None, + offset: Optional[int] = None, + **kwargs, + ) -> DetailedResponse: + """ + Query project notices. + + Finds project-level notices (errors and warnings). Currently, project-level + notices are generated by relevancy training. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str filter: (optional) Searches for documents that match the + Discovery Query Language criteria that is specified as input. Filter calls + are cached and are faster than query calls because the results are not + ordered by relevance. When used with the `aggregation`, `query`, or + `natural_language_query` parameters, the `filter` parameter runs first. + This parameter is useful for limiting results to those that contain + specific metadata values. + :param str query: (optional) A query search that is written in the + Discovery Query Language and returns all matching documents in your data + set with full enrichments and full text, and with the most relevant + documents listed first. You can use this parameter or the + **natural_language_query** parameter to specify the query input, but not + both. + :param str natural_language_query: (optional) A natural language query that + returns relevant documents by using natural language understanding. You can + use this parameter or the **query** parameter to specify the query input, + but not both. To filter the results based on criteria you specify, include + the **filter** parameter in the request. + :param int count: (optional) Number of results to return. The maximum for + the **count** and **offset** values together in any one query is + **10,000**. + :param int offset: (optional) The number of query results to skip at the + beginning. For example, if the total number of results that are returned is + 10 and the offset is 8, it returns the last two results. The maximum for + the **count** and **offset** values together in any one query is **10000**. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `QueryNoticesResponse` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='query_notices', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + 'filter': filter, + 'query': query, + 'natural_language_query': natural_language_query, + 'count': count, + 'offset': offset, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id'] + path_param_values = self.encode_path_vars(project_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/notices'.format(**path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + ######################### + # Query modifications + ######################### + + def get_stopword_list( + self, + project_id: str, + collection_id: str, + **kwargs, + ) -> DetailedResponse: + """ + Get a custom stop words list. + + Returns the custom stop words list that is used by the collection. For information + about the default stop words lists that are applied to queries, see [the product + documentation](/docs/discovery-data?topic=discovery-data-stopwords). + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str collection_id: The Universally Unique Identifier (UUID) of the + collection. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `StopWordList` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + if not collection_id: + raise ValueError('collection_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='get_stopword_list', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id', 'collection_id'] + path_param_values = self.encode_path_vars(project_id, collection_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/collections/{collection_id}/stopwords'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def create_stopword_list( + self, + project_id: str, + collection_id: str, + *, + stopwords: Optional[List[str]] = None, + **kwargs, + ) -> DetailedResponse: + """ + Create a custom stop words list. + + Adds a list of custom stop words. Stop words are words that you want the service + to ignore when they occur in a query because they're not useful in distinguishing + the semantic meaning of the query. The stop words list cannot contain more than 1 + million characters. + A default stop words list is used by all collections. The default list is applied + both at indexing time and at query time. A custom stop words list that you add is + used at query time only. + The custom stop words list augments the default stop words list; you cannot remove + stop words. For information about the default stop words lists per language, see + [the product documentation](/docs/discovery-data?topic=discovery-data-stopwords). + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str collection_id: The Universally Unique Identifier (UUID) of the + collection. + :param List[str] stopwords: (optional) List of stop words. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `StopWordList` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + if not collection_id: + raise ValueError('collection_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='create_stopword_list', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + data = { + 'stopwords': stopwords, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id', 'collection_id'] + path_param_values = self.encode_path_vars(project_id, collection_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/collections/{collection_id}/stopwords'.format( + **path_param_dict) + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + data=data, + ) + + response = self.send(request, **kwargs) + return response + + def delete_stopword_list( + self, + project_id: str, + collection_id: str, + **kwargs, + ) -> DetailedResponse: + """ + Delete a custom stop words list. + + Deletes a custom stop words list to stop using it in queries against the + collection. After a custom stop words list is deleted, the default stop words list + is used. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str collection_id: The Universally Unique Identifier (UUID) of the + collection. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse + """ + + if not project_id: + raise ValueError('project_id must be provided') + if not collection_id: + raise ValueError('collection_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='delete_stopword_list', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + + path_param_keys = ['project_id', 'collection_id'] + path_param_values = self.encode_path_vars(project_id, collection_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/collections/{collection_id}/stopwords'.format( + **path_param_dict) + request = self.prepare_request( + method='DELETE', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def list_expansions( + self, + project_id: str, + collection_id: str, + **kwargs, + ) -> DetailedResponse: + """ + Get the expansion list. + + Returns the current expansion list for the specified collection. If an expansion + list is not specified, an empty expansions array is returned. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str collection_id: The Universally Unique Identifier (UUID) of the + collection. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `Expansions` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + if not collection_id: + raise ValueError('collection_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='list_expansions', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id', 'collection_id'] + path_param_values = self.encode_path_vars(project_id, collection_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/collections/{collection_id}/expansions'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def create_expansions( + self, + project_id: str, + collection_id: str, + expansions: List['Expansion'], + **kwargs, + ) -> DetailedResponse: + """ + Create or update an expansion list. + + Creates or replaces the expansion list for this collection. An expansion list + introduces alternative wording for key terms that are mentioned in your + collection. By identifying synonyms or common misspellings, you expand the scope + of a query beyond exact matches. The maximum number of expanded terms allowed per + collection is 5,000. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str collection_id: The Universally Unique Identifier (UUID) of the + collection. + :param List[Expansion] expansions: An array of query expansion definitions. + Each object in the **expansions** array represents a term or set of terms + that will be expanded into other terms. Each expansion object can be + configured as `bidirectional` or `unidirectional`. + * **Bidirectional**: Each entry in the `expanded_terms` list expands to + include all expanded terms. For example, a query for `ibm` expands to `ibm + OR international business machines OR big blue`. + * **Unidirectional**: The terms in `input_terms` in the query are replaced + by the terms in `expanded_terms`. For example, a query for the often + misused term `on premise` is converted to `on premises OR on-premises` and + does not contain the original term. If you want an input term to be + included in the query, then repeat the input term in the expanded terms + list. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `Expansions` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + if not collection_id: + raise ValueError('collection_id must be provided') + if expansions is None: + raise ValueError('expansions must be provided') + expansions = [convert_model(x) for x in expansions] + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='create_expansions', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + data = { + 'expansions': expansions, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id', 'collection_id'] + path_param_values = self.encode_path_vars(project_id, collection_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/collections/{collection_id}/expansions'.format( + **path_param_dict) + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + data=data, + ) + + response = self.send(request, **kwargs) + return response + + def delete_expansions( + self, + project_id: str, + collection_id: str, + **kwargs, + ) -> DetailedResponse: + """ + Delete the expansion list. + + Removes the expansion information for this collection. To disable query expansion + for a collection, delete the expansion list. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str collection_id: The Universally Unique Identifier (UUID) of the + collection. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse + """ + + if not project_id: + raise ValueError('project_id must be provided') + if not collection_id: + raise ValueError('collection_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='delete_expansions', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + + path_param_keys = ['project_id', 'collection_id'] + path_param_values = self.encode_path_vars(project_id, collection_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/collections/{collection_id}/expansions'.format( + **path_param_dict) + request = self.prepare_request( + method='DELETE', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + ######################### + # Component settings + ######################### + + def get_component_settings( + self, + project_id: str, + **kwargs, + ) -> DetailedResponse: + """ + List component settings. + + Returns default configuration settings for components. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `ComponentSettingsResponse` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='get_component_settings', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id'] + path_param_values = self.encode_path_vars(project_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/component_settings'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + ######################### + # Training data + ######################### + + def list_training_queries( + self, + project_id: str, + **kwargs, + ) -> DetailedResponse: + """ + List training queries. + + List the training queries for the specified project. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `TrainingQuerySet` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='list_training_queries', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id'] + path_param_values = self.encode_path_vars(project_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/training_data/queries'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def delete_training_queries( + self, + project_id: str, + **kwargs, + ) -> DetailedResponse: + """ + Delete training queries. + + Removes all training queries for the specified project. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse + """ + + if not project_id: + raise ValueError('project_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='delete_training_queries', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + + path_param_keys = ['project_id'] + path_param_values = self.encode_path_vars(project_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/training_data/queries'.format( + **path_param_dict) + request = self.prepare_request( + method='DELETE', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def create_training_query( + self, + project_id: str, + natural_language_query: str, + examples: List['TrainingExample'], + *, + filter: Optional[str] = None, + **kwargs, + ) -> DetailedResponse: + """ + Create a training query. + + Add a query to the training data for this project. The query can contain a filter + and natural language query. + **Note**: You cannot apply relevancy training to a `content_mining` project type. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str natural_language_query: The natural text query that is used as + the training query. + :param List[TrainingExample] examples: Array of training examples. + :param str filter: (optional) The filter used on the collection before the + **natural_language_query** is applied. Only specify a filter if the + documents that you consider to be most relevant are not included in the top + 100 results when you submit test queries. If you specify a filter during + training, apply the same filter to queries that are submitted at runtime + for optimal ranking results. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `TrainingQuery` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + if natural_language_query is None: + raise ValueError('natural_language_query must be provided') + if examples is None: + raise ValueError('examples must be provided') + examples = [convert_model(x) for x in examples] + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='create_training_query', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + data = { + 'natural_language_query': natural_language_query, + 'examples': examples, + 'filter': filter, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id'] + path_param_values = self.encode_path_vars(project_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/training_data/queries'.format( + **path_param_dict) + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + data=data, + ) + + response = self.send(request, **kwargs) + return response + + def get_training_query( + self, + project_id: str, + query_id: str, + **kwargs, + ) -> DetailedResponse: + """ + Get a training data query. + + Get details for a specific training data query, including the query string and all + examples. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str query_id: The ID of the query used for training. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `TrainingQuery` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + if not query_id: + raise ValueError('query_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='get_training_query', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id', 'query_id'] + path_param_values = self.encode_path_vars(project_id, query_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/training_data/queries/{query_id}'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def update_training_query( + self, + project_id: str, + query_id: str, + natural_language_query: str, + examples: List['TrainingExample'], + *, + filter: Optional[str] = None, + **kwargs, + ) -> DetailedResponse: + """ + Update a training query. + + Updates an existing training query and its examples. You must resubmit all of the + examples with the update request. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str query_id: The ID of the query used for training. + :param str natural_language_query: The natural text query that is used as + the training query. + :param List[TrainingExample] examples: Array of training examples. + :param str filter: (optional) The filter used on the collection before the + **natural_language_query** is applied. Only specify a filter if the + documents that you consider to be most relevant are not included in the top + 100 results when you submit test queries. If you specify a filter during + training, apply the same filter to queries that are submitted at runtime + for optimal ranking results. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `TrainingQuery` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + if not query_id: + raise ValueError('query_id must be provided') + if natural_language_query is None: + raise ValueError('natural_language_query must be provided') + if examples is None: + raise ValueError('examples must be provided') + examples = [convert_model(x) for x in examples] + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='update_training_query', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + data = { + 'natural_language_query': natural_language_query, + 'examples': examples, + 'filter': filter, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id', 'query_id'] + path_param_values = self.encode_path_vars(project_id, query_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/training_data/queries/{query_id}'.format( + **path_param_dict) + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + data=data, + ) + + response = self.send(request, **kwargs) + return response + + def delete_training_query( + self, + project_id: str, + query_id: str, + **kwargs, + ) -> DetailedResponse: + """ + Delete a training data query. + + Removes details from a training data query, including the query string and all + examples. + To delete an example, use the *Update a training query* method and omit the + example that you want to delete from the example set. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str query_id: The ID of the query used for training. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse + """ + + if not project_id: + raise ValueError('project_id must be provided') + if not query_id: + raise ValueError('query_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='delete_training_query', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + + path_param_keys = ['project_id', 'query_id'] + path_param_values = self.encode_path_vars(project_id, query_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/training_data/queries/{query_id}'.format( + **path_param_dict) + request = self.prepare_request( + method='DELETE', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + ######################### + # Enrichments + ######################### + + def list_enrichments( + self, + project_id: str, + **kwargs, + ) -> DetailedResponse: + """ + List enrichments. + + Lists the enrichments available to this project. The *Part of Speech* and + *Sentiment of Phrases* enrichments might be listed, but are reserved for internal + use only. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `Enrichments` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='list_enrichments', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id'] + path_param_values = self.encode_path_vars(project_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/enrichments'.format(**path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def create_enrichment( + self, + project_id: str, + enrichment: 'CreateEnrichment', + *, + file: Optional[BinaryIO] = None, + **kwargs, + ) -> DetailedResponse: + """ + Create an enrichment. + + Create an enrichment for use with the specified project. To apply the enrichment + to a collection in the project, use the [Collections + API](/apidocs/discovery-data#createcollection). + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param CreateEnrichment enrichment: Information about a specific + enrichment. + :param BinaryIO file: (optional) The enrichment file to upload. Expected + file types per enrichment are as follows: + * CSV for `dictionary` and `sentence_classifier` (the training data CSV + file to upload). + * PEAR for `uima_annotator` and `rule_based` (Explorer) + * ZIP for `watson_knowledge_studio_model` and `rule_based` (Studio Advanced + Rule Editor). + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `Enrichment` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + if enrichment is None: + raise ValueError('enrichment must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='create_enrichment', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + form_data = [] + form_data.append( + ('enrichment', (None, json.dumps(enrichment), 'application/json'))) + if file: + form_data.append(('file', (None, file, 'application/octet-stream'))) + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id'] + path_param_values = self.encode_path_vars(project_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/enrichments'.format(**path_param_dict) + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + files=form_data, + ) + + response = self.send(request, **kwargs) + return response + + def get_enrichment( + self, + project_id: str, + enrichment_id: str, + **kwargs, + ) -> DetailedResponse: + """ + Get enrichment. + + Get details about a specific enrichment. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str enrichment_id: The Universally Unique Identifier (UUID) of the + enrichment. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `Enrichment` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + if not enrichment_id: + raise ValueError('enrichment_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='get_enrichment', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id', 'enrichment_id'] + path_param_values = self.encode_path_vars(project_id, enrichment_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/enrichments/{enrichment_id}'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def update_enrichment( + self, + project_id: str, + enrichment_id: str, + name: str, + *, + description: Optional[str] = None, + **kwargs, + ) -> DetailedResponse: + """ + Update an enrichment. + + Updates an existing enrichment's name and description. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str enrichment_id: The Universally Unique Identifier (UUID) of the + enrichment. + :param str name: A new name for the enrichment. + :param str description: (optional) A new description for the enrichment. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `Enrichment` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + if not enrichment_id: + raise ValueError('enrichment_id must be provided') + if name is None: + raise ValueError('name must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='update_enrichment', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + data = { + 'name': name, + 'description': description, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id', 'enrichment_id'] + path_param_values = self.encode_path_vars(project_id, enrichment_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/enrichments/{enrichment_id}'.format( + **path_param_dict) + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + data=data, + ) + + response = self.send(request, **kwargs) + return response + + def delete_enrichment( + self, + project_id: str, + enrichment_id: str, + **kwargs, + ) -> DetailedResponse: + """ + Delete an enrichment. + + Deletes an existing enrichment from the specified project. + **Note:** Only enrichments that have been manually created can be deleted. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str enrichment_id: The Universally Unique Identifier (UUID) of the + enrichment. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse + """ + + if not project_id: + raise ValueError('project_id must be provided') + if not enrichment_id: + raise ValueError('enrichment_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='delete_enrichment', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + + path_param_keys = ['project_id', 'enrichment_id'] + path_param_values = self.encode_path_vars(project_id, enrichment_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/enrichments/{enrichment_id}'.format( + **path_param_dict) + request = self.prepare_request( + method='DELETE', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + ######################### + # Batches + ######################### + + def list_batches( + self, + project_id: str, + collection_id: str, + **kwargs, + ) -> DetailedResponse: + """ + List batches. + + A batch is a set of documents that are ready for enrichment by an external + application. After you apply a webhook enrichment to a collection, and then + process or upload documents to the collection, Discovery creates a batch with a + unique **batch_id**. + To start, you must register your external application as a **webhook** type by + using the [Create enrichment API](/apidocs/discovery-data#createenrichment) + method. + Use the List batches API to get the following: + * Notified batches that are not yet pulled by the external enrichment + application. + * Batches that are pulled, but not yet pushed to Discovery by the external + enrichment application. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str collection_id: The Universally Unique Identifier (UUID) of the + collection. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `ListBatchesResponse` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + if not collection_id: + raise ValueError('collection_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='list_batches', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id', 'collection_id'] + path_param_values = self.encode_path_vars(project_id, collection_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/collections/{collection_id}/batches'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def pull_batches( + self, + project_id: str, + collection_id: str, + batch_id: str, + **kwargs, + ) -> DetailedResponse: + """ + Pull batches. + + Pull a batch of documents from Discovery for enrichment by an external + application. Ensure to include the `Accept-Encoding: gzip` header in this method + to get the file. You can also implement retry logic when calling this method to + avoid any network errors. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str collection_id: The Universally Unique Identifier (UUID) of the + collection. + :param str batch_id: The Universally Unique Identifier (UUID) of the + document batch that is being requested from Discovery. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `PullBatchesResponse` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + if not collection_id: + raise ValueError('collection_id must be provided') + if not batch_id: + raise ValueError('batch_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='pull_batches', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id', 'collection_id', 'batch_id'] + path_param_values = self.encode_path_vars(project_id, collection_id, + batch_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/collections/{collection_id}/batches/{batch_id}'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def push_batches( + self, + project_id: str, + collection_id: str, + batch_id: str, + *, + file: Optional[BinaryIO] = None, + filename: Optional[str] = None, + **kwargs, + ) -> DetailedResponse: + """ + Push batches. + + Push a batch of documents to Discovery after annotation by an external + application. You can implement retry logic when calling this method to avoid any + network errors. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str collection_id: The Universally Unique Identifier (UUID) of the + collection. + :param str batch_id: The Universally Unique Identifier (UUID) of the + document batch that is being requested from Discovery. + :param BinaryIO file: (optional) A compressed newline-delimited JSON + (NDJSON), which is a JSON file with one row of data per line. For example, + `{batch_id}.ndjson.gz`. For more information, see [Binary attachment in the + push batches + method](/docs/discovery-data?topic=discovery-data-external-enrichment#binary-attachment-push-batches). + There is no limitation on the name of the file because Discovery does not + use the name for processing. The list of features in the document is + specified in the `features` object. + :param str filename: (optional) The filename for file. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `bool` result + """ + + if not project_id: + raise ValueError('project_id must be provided') + if not collection_id: + raise ValueError('collection_id must be provided') + if not batch_id: + raise ValueError('batch_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='push_batches', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + form_data = [] + if file: + if not filename and hasattr(file, 'name'): + filename = basename(file.name) + if not filename: + raise ValueError('filename must be provided') + form_data.append( + ('file', (filename, file, 'application/octet-stream'))) + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id', 'collection_id', 'batch_id'] + path_param_values = self.encode_path_vars(project_id, collection_id, + batch_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/collections/{collection_id}/batches/{batch_id}'.format( + **path_param_dict) + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + files=form_data, + ) + + response = self.send(request, **kwargs) + return response + + ######################### + # Document classifiers + ######################### + + def list_document_classifiers( + self, + project_id: str, + **kwargs, + ) -> DetailedResponse: + """ + List document classifiers. + + Get a list of the document classifiers in a project. Returns only the name and + classifier ID of each document classifier. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `DocumentClassifiers` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='list_document_classifiers', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id'] + path_param_values = self.encode_path_vars(project_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/document_classifiers'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def create_document_classifier( + self, + project_id: str, + training_data: BinaryIO, + classifier: 'CreateDocumentClassifier', + *, + test_data: Optional[BinaryIO] = None, + **kwargs, + ) -> DetailedResponse: + """ + Create a document classifier. + + Create a document classifier. You can use the API to create a document classifier + in any project type. After you create a document classifier, you can use the + Enrichments API to create a classifier enrichment, and then the Collections API to + apply the enrichment to a collection in the project. + **Note:** This method is supported on installed instances (IBM Cloud Pak for Data) + or IBM Cloud-managed Premium or Enterprise plan instances. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param BinaryIO training_data: The training data CSV file to upload. The + CSV file must have headers. The file must include a field that contains the + text you want to classify and a field that contains the classification + labels that you want to use to classify your data. If you want to specify + multiple values in a single field, use a semicolon as the value separator. + For a sample file, see [the product + documentation](/docs/discovery-data?topic=discovery-data-cm-doc-classifier). + :param CreateDocumentClassifier classifier: An object that manages the + settings and data that is required to train a document classification + model. + :param BinaryIO test_data: (optional) The CSV with test data to upload. The + column values in the test file must be the same as the column values in the + training data file. If no test data is provided, the training data is split + into two separate groups of training and test data. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `DocumentClassifier` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + if training_data is None: + raise ValueError('training_data must be provided') + if classifier is None: + raise ValueError('classifier must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='create_document_classifier', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + form_data = [] + form_data.append(('training_data', (None, training_data, 'text/csv'))) + form_data.append( + ('classifier', (None, json.dumps(classifier), 'application/json'))) + if test_data: + form_data.append(('test_data', (None, test_data, 'text/csv'))) + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id'] + path_param_values = self.encode_path_vars(project_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/document_classifiers'.format( + **path_param_dict) + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + files=form_data, + ) + + response = self.send(request, **kwargs) + return response + + def get_document_classifier( + self, + project_id: str, + classifier_id: str, + **kwargs, + ) -> DetailedResponse: + """ + Get a document classifier. + + Get details about a specific document classifier. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str classifier_id: The Universally Unique Identifier (UUID) of the + classifier. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `DocumentClassifier` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + if not classifier_id: + raise ValueError('classifier_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='get_document_classifier', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id', 'classifier_id'] + path_param_values = self.encode_path_vars(project_id, classifier_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/document_classifiers/{classifier_id}'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def update_document_classifier( + self, + project_id: str, + classifier_id: str, + classifier: 'UpdateDocumentClassifier', + *, + training_data: Optional[BinaryIO] = None, + test_data: Optional[BinaryIO] = None, + **kwargs, + ) -> DetailedResponse: + """ + Update a document classifier. + + Update the document classifier name or description, update the training data, or + add or update the test data. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str classifier_id: The Universally Unique Identifier (UUID) of the + classifier. + :param UpdateDocumentClassifier classifier: An object that contains a new + name or description for a document classifier, updated training data, or + new or updated test data. + :param BinaryIO training_data: (optional) The training data CSV file to + upload. The CSV file must have headers. The file must include a field that + contains the text you want to classify and a field that contains the + classification labels that you want to use to classify your data. If you + want to specify multiple values in a single column, use a semicolon as the + value separator. For a sample file, see [the product + documentation](/docs/discovery-data?topic=discovery-data-cm-doc-classifier). + :param BinaryIO test_data: (optional) The CSV with test data to upload. The + column values in the test file must be the same as the column values in the + training data file. If no test data is provided, the training data is split + into two separate groups of training and test data. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `DocumentClassifier` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + if not classifier_id: + raise ValueError('classifier_id must be provided') + if classifier is None: + raise ValueError('classifier must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='update_document_classifier', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + form_data = [] + form_data.append( + ('classifier', (None, json.dumps(classifier), 'application/json'))) + if training_data: + form_data.append( + ('training_data', (None, training_data, 'text/csv'))) + if test_data: + form_data.append(('test_data', (None, test_data, 'text/csv'))) + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id', 'classifier_id'] + path_param_values = self.encode_path_vars(project_id, classifier_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/document_classifiers/{classifier_id}'.format( + **path_param_dict) + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + files=form_data, + ) + + response = self.send(request, **kwargs) + return response + + def delete_document_classifier( + self, + project_id: str, + classifier_id: str, + **kwargs, + ) -> DetailedResponse: + """ + Delete a document classifier. + + Deletes an existing document classifier from the specified project. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str classifier_id: The Universally Unique Identifier (UUID) of the + classifier. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse + """ + + if not project_id: + raise ValueError('project_id must be provided') + if not classifier_id: + raise ValueError('classifier_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='delete_document_classifier', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + + path_param_keys = ['project_id', 'classifier_id'] + path_param_values = self.encode_path_vars(project_id, classifier_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/document_classifiers/{classifier_id}'.format( + **path_param_dict) + request = self.prepare_request( + method='DELETE', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + ######################### + # Document classifier models + ######################### + + def list_document_classifier_models( + self, + project_id: str, + classifier_id: str, + **kwargs, + ) -> DetailedResponse: + """ + List document classifier models. + + Get a list of the document classifier models in a project. Returns only the name + and model ID of each document classifier model. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str classifier_id: The Universally Unique Identifier (UUID) of the + classifier. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `DocumentClassifierModels` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + if not classifier_id: + raise ValueError('classifier_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='list_document_classifier_models', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id', 'classifier_id'] + path_param_values = self.encode_path_vars(project_id, classifier_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/document_classifiers/{classifier_id}/models'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def create_document_classifier_model( + self, + project_id: str, + classifier_id: str, + name: str, + *, + description: Optional[str] = None, + learning_rate: Optional[float] = None, + l1_regularization_strengths: Optional[List[float]] = None, + l2_regularization_strengths: Optional[List[float]] = None, + training_max_steps: Optional[int] = None, + improvement_ratio: Optional[float] = None, + **kwargs, + ) -> DetailedResponse: + """ + Create a document classifier model. + + Create a document classifier model by training a model that uses the data and + classifier settings defined in the specified document classifier. + **Note:** This method is supported on installed intances (IBM Cloud Pak for Data) + or IBM Cloud-managed Premium or Enterprise plan instances. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str classifier_id: The Universally Unique Identifier (UUID) of the + classifier. + :param str name: The name of the document classifier model. + :param str description: (optional) A description of the document classifier + model. + :param float learning_rate: (optional) A tuning parameter in an + optimization algorithm that determines the step size at each iteration of + the training process. It influences how much of any newly acquired + information overrides the existing information, and therefore is said to + represent the speed at which a machine learning model learns. The default + value is `0.1`. + :param List[float] l1_regularization_strengths: (optional) Avoids + overfitting by shrinking the coefficient of less important features to + zero, which removes some features altogether. You can specify many values + for hyper-parameter optimization. The default value is `[0.000001]`. + :param List[float] l2_regularization_strengths: (optional) A method you can + apply to avoid overfitting your model on the training data. You can specify + many values for hyper-parameter optimization. The default value is + `[0.000001]`. + :param int training_max_steps: (optional) Maximum number of training steps + to complete. This setting is useful if you need the training process to + finish in a specific time frame to fit into an automated process. The + default value is ten million. + :param float improvement_ratio: (optional) Stops the training run early if + the improvement ratio is not met by the time the process reaches a certain + point. The default value is `0.00001`. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `DocumentClassifierModel` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + if not classifier_id: + raise ValueError('classifier_id must be provided') + if name is None: + raise ValueError('name must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='create_document_classifier_model', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + data = { + 'name': name, + 'description': description, + 'learning_rate': learning_rate, + 'l1_regularization_strengths': l1_regularization_strengths, + 'l2_regularization_strengths': l2_regularization_strengths, + 'training_max_steps': training_max_steps, + 'improvement_ratio': improvement_ratio, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id', 'classifier_id'] + path_param_values = self.encode_path_vars(project_id, classifier_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/document_classifiers/{classifier_id}/models'.format( + **path_param_dict) + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + data=data, + ) + + response = self.send(request, **kwargs) + return response + + def get_document_classifier_model( + self, + project_id: str, + classifier_id: str, + model_id: str, + **kwargs, + ) -> DetailedResponse: + """ + Get a document classifier model. + + Get details about a specific document classifier model. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str classifier_id: The Universally Unique Identifier (UUID) of the + classifier. + :param str model_id: The Universally Unique Identifier (UUID) of the + classifier model. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `DocumentClassifierModel` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + if not classifier_id: + raise ValueError('classifier_id must be provided') + if not model_id: + raise ValueError('model_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='get_document_classifier_model', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id', 'classifier_id', 'model_id'] + path_param_values = self.encode_path_vars(project_id, classifier_id, + model_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/document_classifiers/{classifier_id}/models/{model_id}'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def update_document_classifier_model( + self, + project_id: str, + classifier_id: str, + model_id: str, + *, + name: Optional[str] = None, + description: Optional[str] = None, + **kwargs, + ) -> DetailedResponse: + """ + Update a document classifier model. + + Update the document classifier model name or description. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str classifier_id: The Universally Unique Identifier (UUID) of the + classifier. + :param str model_id: The Universally Unique Identifier (UUID) of the + classifier model. + :param str name: (optional) A new name for the enrichment. + :param str description: (optional) A new description for the enrichment. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `DocumentClassifierModel` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + if not classifier_id: + raise ValueError('classifier_id must be provided') + if not model_id: + raise ValueError('model_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='update_document_classifier_model', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + data = { + 'name': name, + 'description': description, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id', 'classifier_id', 'model_id'] + path_param_values = self.encode_path_vars(project_id, classifier_id, + model_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/document_classifiers/{classifier_id}/models/{model_id}'.format( + **path_param_dict) + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + data=data, + ) + + response = self.send(request, **kwargs) + return response + + def delete_document_classifier_model( + self, + project_id: str, + classifier_id: str, + model_id: str, + **kwargs, + ) -> DetailedResponse: + """ + Delete a document classifier model. + + Deletes an existing document classifier model from the specified project. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str classifier_id: The Universally Unique Identifier (UUID) of the + classifier. + :param str model_id: The Universally Unique Identifier (UUID) of the + classifier model. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse + """ + + if not project_id: + raise ValueError('project_id must be provided') + if not classifier_id: + raise ValueError('classifier_id must be provided') + if not model_id: + raise ValueError('model_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='delete_document_classifier_model', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + + path_param_keys = ['project_id', 'classifier_id', 'model_id'] + path_param_values = self.encode_path_vars(project_id, classifier_id, + model_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/document_classifiers/{classifier_id}/models/{model_id}'.format( + **path_param_dict) + request = self.prepare_request( + method='DELETE', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + ######################### + # Analyze + ######################### + + def analyze_document( + self, + project_id: str, + collection_id: str, + *, + file: Optional[BinaryIO] = None, + filename: Optional[str] = None, + file_content_type: Optional[str] = None, + metadata: Optional[str] = None, + **kwargs, + ) -> DetailedResponse: + """ + Analyze a document. + + Process a document and return it for realtime use. Supports JSON files only. + The file is not stored in the collection, but is processed according to the + collection's configuration settings. To get results, enrichments must be applied + to a field in the collection that also exists in the file that you want to + analyze. For example, to analyze text in a `Quote` field, you must apply + enrichments to the `Quote` field in the collection configuration. Then, when you + analyze the file, the text in the `Quote` field is analyzed and results are + written to a field named `enriched_Quote`. + Submit a request against only one collection at a time. Remember, the documents in + the collection are not significant. It is the enrichments that are defined for the + collection that matter. If you submit requests to several collections, then + several models are initiated at the same time, which can cause request failures. + **Note:** This method is supported with Enterprise plan deployments and installed + deployments only. + + :param str project_id: The Universally Unique Identifier (UUID) of the + project. This information can be found from the *Integrate and Deploy* page + in Discovery. + :param str collection_id: The Universally Unique Identifier (UUID) of the + collection. + :param BinaryIO file: (optional) **Add a document**: The content of the + document to ingest. For the supported file types and maximum supported file + size limits when adding a document, see [the + documentation](/docs/discovery-data?topic=discovery-data-collections#supportedfiletypes). + **Analyze a document**: The content of the document to analyze but not + ingest. Only the `application/json` content type is supported by the + Analyze API. For maximum supported file size limits, see [the product + documentation](/docs/discovery-data?topic=discovery-data-analyzeapi#analyzeapi-limits). + :param str filename: (optional) The filename for file. + :param str file_content_type: (optional) The content type of file. + :param str metadata: (optional) Add information about the file that you + want to include in the response. + The maximum supported metadata file size is 1 MB. Metadata parts larger + than 1 MB are rejected. + Example: + ``` + { + "filename": "favorites2.json", + "file_type": "json" + }. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `AnalyzedDocument` object + """ + + if not project_id: + raise ValueError('project_id must be provided') + if not collection_id: + raise ValueError('collection_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='analyze_document', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + form_data = [] + if file: + if not filename and hasattr(file, 'name'): + filename = basename(file.name) + if not filename: + raise ValueError('filename must be provided') + form_data.append(('file', (filename, file, file_content_type or + 'application/octet-stream'))) + if metadata: + form_data.append(('metadata', (None, metadata, 'text/plain'))) + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['project_id', 'collection_id'] + path_param_values = self.encode_path_vars(project_id, collection_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v2/projects/{project_id}/collections/{collection_id}/analyze'.format( + **path_param_dict) + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + files=form_data, + ) + + response = self.send(request, **kwargs) + return response + + ######################### + # User data + ######################### + + def delete_user_data( + self, + customer_id: str, + **kwargs, + ) -> DetailedResponse: + """ + Delete labeled data. + + Deletes all data associated with a specified customer ID. The method has no effect + if no data is associated with the customer ID. + You associate a customer ID with data by passing the **X-Watson-Metadata** header + with a request that passes data. For more information about personal data and + customer IDs, see [Information + security](/docs/discovery-data?topic=discovery-data-information-security#information-security). + **Note:** This method is only supported on IBM Cloud instances of Discovery. + + :param str customer_id: The customer ID for which all data is to be + deleted. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse + """ + + if not customer_id: + raise ValueError('customer_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V2', + operation_id='delete_user_data', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + 'customer_id': customer_id, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + + url = '/v2/user_data' + request = self.prepare_request( + method='DELETE', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + +class AddDocumentEnums: + """ + Enums for add_document parameters. + """ + + class FileContentType(str, Enum): + """ + The content type of file. + """ + + APPLICATION_JSON = 'application/json' + APPLICATION_MSWORD = 'application/msword' + APPLICATION_VND_OPENXMLFORMATS_OFFICEDOCUMENT_WORDPROCESSINGML_DOCUMENT = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document' + APPLICATION_PDF = 'application/pdf' + TEXT_HTML = 'text/html' + APPLICATION_XHTML_XML = 'application/xhtml+xml' + + +class UpdateDocumentEnums: + """ + Enums for update_document parameters. + """ + + class FileContentType(str, Enum): + """ + The content type of file. + """ + + APPLICATION_JSON = 'application/json' + APPLICATION_MSWORD = 'application/msword' + APPLICATION_VND_OPENXMLFORMATS_OFFICEDOCUMENT_WORDPROCESSINGML_DOCUMENT = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document' + APPLICATION_PDF = 'application/pdf' + TEXT_HTML = 'text/html' + APPLICATION_XHTML_XML = 'application/xhtml+xml' + + +class AnalyzeDocumentEnums: + """ + Enums for analyze_document parameters. + """ + + class FileContentType(str, Enum): + """ + The content type of file. + """ + + APPLICATION_JSON = 'application/json' + APPLICATION_MSWORD = 'application/msword' + APPLICATION_VND_OPENXMLFORMATS_OFFICEDOCUMENT_WORDPROCESSINGML_DOCUMENT = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document' + APPLICATION_PDF = 'application/pdf' + TEXT_HTML = 'text/html' + APPLICATION_XHTML_XML = 'application/xhtml+xml' + + +############################################################################## +# Models +############################################################################## + + +class AnalyzedDocument: + """ + An object that contains the converted document and any identified enrichments. + Root-level fields from the original file are returned also. + + :param List[Notice] notices: (optional) Array of notices that are triggered when + the files are processed. + :param AnalyzedResult result: (optional) Result of the document analysis. + """ + + def __init__( + self, + *, + notices: Optional[List['Notice']] = None, + result: Optional['AnalyzedResult'] = None, + ) -> None: + """ + Initialize a AnalyzedDocument object. + + :param List[Notice] notices: (optional) Array of notices that are triggered + when the files are processed. + :param AnalyzedResult result: (optional) Result of the document analysis. + """ + self.notices = notices + self.result = result + + @classmethod + def from_dict(cls, _dict: Dict) -> 'AnalyzedDocument': + """Initialize a AnalyzedDocument object from a json dictionary.""" + args = {} + if (notices := _dict.get('notices')) is not None: + args['notices'] = [Notice.from_dict(v) for v in notices] + if (result := _dict.get('result')) is not None: + args['result'] = AnalyzedResult.from_dict(result) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a AnalyzedDocument object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'notices') and self.notices is not None: + notices_list = [] + for v in self.notices: + if isinstance(v, dict): + notices_list.append(v) + else: + notices_list.append(v.to_dict()) + _dict['notices'] = notices_list + if hasattr(self, 'result') and self.result is not None: + if isinstance(self.result, dict): + _dict['result'] = self.result + else: + _dict['result'] = self.result.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this AnalyzedDocument object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'AnalyzedDocument') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'AnalyzedDocument') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class AnalyzedResult: + """ + Result of the document analysis. + + :param dict metadata: (optional) Metadata that was specified with the request. + + This type supports additional properties of type object. The remaining key-value + pairs. + """ + + # The set of defined properties for the class + _properties = frozenset(['metadata']) + + def __init__( + self, + *, + metadata: Optional[dict] = None, + **kwargs: Optional[object], + ) -> None: + """ + Initialize a AnalyzedResult object. + + :param dict metadata: (optional) Metadata that was specified with the + request. + :param object **kwargs: (optional) The remaining key-value pairs. + """ + self.metadata = metadata + for k, v in kwargs.items(): + if k not in AnalyzedResult._properties: + if not isinstance(v, object): + raise ValueError( + 'Value for additional property {} must be of type object' + .format(k)) + setattr(self, k, v) + else: + raise ValueError( + 'Property {} cannot be specified as an additional property'. + format(k)) + + @classmethod + def from_dict(cls, _dict: Dict) -> 'AnalyzedResult': + """Initialize a AnalyzedResult object from a json dictionary.""" + args = {} + if (metadata := _dict.get('metadata')) is not None: + args['metadata'] = metadata + for k, v in _dict.items(): + if k not in cls._properties: + if not isinstance(v, object): + raise ValueError( + 'Value for additional property {} must be of type object' + .format(k)) + args[k] = v + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a AnalyzedResult object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'metadata') and self.metadata is not None: + _dict['metadata'] = self.metadata + for k in [ + _k for _k in vars(self).keys() + if _k not in AnalyzedResult._properties + ]: + _dict[k] = getattr(self, k) + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def get_properties(self) -> Dict: + """Return the additional properties from this instance of AnalyzedResult in the form of a dict.""" + _dict = {} + for k in [ + _k for _k in vars(self).keys() + if _k not in AnalyzedResult._properties + ]: + _dict[k] = getattr(self, k) + return _dict + + def set_properties(self, _dict: dict): + """Set a dictionary of additional properties in this instance of AnalyzedResult""" + for k in [ + _k for _k in vars(self).keys() + if _k not in AnalyzedResult._properties + ]: + delattr(self, k) + for k, v in _dict.items(): + if k not in AnalyzedResult._properties: + if not isinstance(v, object): + raise ValueError( + 'Value for additional property {} must be of type object' + .format(k)) + setattr(self, k, v) + else: + raise ValueError( + 'Property {} cannot be specified as an additional property'. + format(k)) + + def __str__(self) -> str: + """Return a `str` version of this AnalyzedResult object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'AnalyzedResult') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'AnalyzedResult') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class BatchDetails: + """ + A batch is a set of documents that are ready for enrichment by an external + application. After you apply a webhook enrichment to a collection, and then process or + upload documents to the collection, Discovery creates a batch with a unique + **batch_id**. + + :param str batch_id: (optional) The Universally Unique Identifier (UUID) for a + batch of documents. + :param datetime created: (optional) The date and time (RFC3339) that the batch + was created. + :param str enrichment_id: (optional) The Universally Unique Identifier (UUID) + for the external enrichment. + """ + + def __init__( + self, + *, + batch_id: Optional[str] = None, + created: Optional[datetime] = None, + enrichment_id: Optional[str] = None, + ) -> None: + """ + Initialize a BatchDetails object. + + :param str enrichment_id: (optional) The Universally Unique Identifier + (UUID) for the external enrichment. + """ + self.batch_id = batch_id + self.created = created + self.enrichment_id = enrichment_id + + @classmethod + def from_dict(cls, _dict: Dict) -> 'BatchDetails': + """Initialize a BatchDetails object from a json dictionary.""" + args = {} + if (batch_id := _dict.get('batch_id')) is not None: + args['batch_id'] = batch_id + if (created := _dict.get('created')) is not None: + args['created'] = string_to_datetime(created) + if (enrichment_id := _dict.get('enrichment_id')) is not None: + args['enrichment_id'] = enrichment_id + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a BatchDetails object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'batch_id') and getattr(self, 'batch_id') is not None: + _dict['batch_id'] = getattr(self, 'batch_id') + if hasattr(self, 'created') and getattr(self, 'created') is not None: + _dict['created'] = datetime_to_string(getattr(self, 'created')) + if hasattr(self, 'enrichment_id') and self.enrichment_id is not None: + _dict['enrichment_id'] = self.enrichment_id + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this BatchDetails object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'BatchDetails') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'BatchDetails') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ClassifierFederatedModel: + """ + An object with details for creating federated document classifier models. + + :param str field: Name of the field that contains the values from which multiple + classifier models are defined. For example, you can specify a field that lists + product lines to create a separate model per product line. + """ + + def __init__( + self, + field: str, + ) -> None: + """ + Initialize a ClassifierFederatedModel object. + + :param str field: Name of the field that contains the values from which + multiple classifier models are defined. For example, you can specify a + field that lists product lines to create a separate model per product line. + """ + self.field = field + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ClassifierFederatedModel': + """Initialize a ClassifierFederatedModel object from a json dictionary.""" + args = {} + if (field := _dict.get('field')) is not None: + args['field'] = field + else: + raise ValueError( + 'Required property \'field\' not present in ClassifierFederatedModel JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ClassifierFederatedModel object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'field') and self.field is not None: + _dict['field'] = self.field + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ClassifierFederatedModel object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ClassifierFederatedModel') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ClassifierFederatedModel') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ClassifierModelEvaluation: + """ + An object that contains information about a trained document classifier model. + + :param ModelEvaluationMicroAverage micro_average: A micro-average aggregates the + contributions of all classes to compute the average metric. Classes refers to + the classification labels that are specified in the **answer_field**. + :param ModelEvaluationMacroAverage macro_average: A macro-average computes + metric independently for each class and then takes the average. Class refers to + the classification label that is specified in the **answer_field**. + :param List[PerClassModelEvaluation] per_class: An array of evaluation metrics, + one set of metrics for each class, where class refers to the classification + label that is specified in the **answer_field**. + """ + + def __init__( + self, + micro_average: 'ModelEvaluationMicroAverage', + macro_average: 'ModelEvaluationMacroAverage', + per_class: List['PerClassModelEvaluation'], + ) -> None: + """ + Initialize a ClassifierModelEvaluation object. + + :param ModelEvaluationMicroAverage micro_average: A micro-average + aggregates the contributions of all classes to compute the average metric. + Classes refers to the classification labels that are specified in the + **answer_field**. + :param ModelEvaluationMacroAverage macro_average: A macro-average computes + metric independently for each class and then takes the average. Class + refers to the classification label that is specified in the + **answer_field**. + :param List[PerClassModelEvaluation] per_class: An array of evaluation + metrics, one set of metrics for each class, where class refers to the + classification label that is specified in the **answer_field**. + """ + self.micro_average = micro_average + self.macro_average = macro_average + self.per_class = per_class + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ClassifierModelEvaluation': + """Initialize a ClassifierModelEvaluation object from a json dictionary.""" + args = {} + if (micro_average := _dict.get('micro_average')) is not None: + args['micro_average'] = ModelEvaluationMicroAverage.from_dict( + micro_average) + else: + raise ValueError( + 'Required property \'micro_average\' not present in ClassifierModelEvaluation JSON' + ) + if (macro_average := _dict.get('macro_average')) is not None: + args['macro_average'] = ModelEvaluationMacroAverage.from_dict( + macro_average) + else: + raise ValueError( + 'Required property \'macro_average\' not present in ClassifierModelEvaluation JSON' + ) + if (per_class := _dict.get('per_class')) is not None: + args['per_class'] = [ + PerClassModelEvaluation.from_dict(v) for v in per_class + ] + else: + raise ValueError( + 'Required property \'per_class\' not present in ClassifierModelEvaluation JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ClassifierModelEvaluation object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'micro_average') and self.micro_average is not None: + if isinstance(self.micro_average, dict): + _dict['micro_average'] = self.micro_average + else: + _dict['micro_average'] = self.micro_average.to_dict() + if hasattr(self, 'macro_average') and self.macro_average is not None: + if isinstance(self.macro_average, dict): + _dict['macro_average'] = self.macro_average + else: + _dict['macro_average'] = self.macro_average.to_dict() + if hasattr(self, 'per_class') and self.per_class is not None: + per_class_list = [] + for v in self.per_class: + if isinstance(v, dict): + per_class_list.append(v) + else: + per_class_list.append(v.to_dict()) + _dict['per_class'] = per_class_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ClassifierModelEvaluation object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ClassifierModelEvaluation') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ClassifierModelEvaluation') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Collection: + """ + A collection for storing documents. + + :param str collection_id: (optional) The Universally Unique Identifier (UUID) of + the collection. + :param str name: (optional) The name of the collection. + """ + + def __init__( + self, + *, + collection_id: Optional[str] = None, + name: Optional[str] = None, + ) -> None: + """ + Initialize a Collection object. + + :param str name: (optional) The name of the collection. + """ + self.collection_id = collection_id + self.name = name + + @classmethod + def from_dict(cls, _dict: Dict) -> 'Collection': + """Initialize a Collection object from a json dictionary.""" + args = {} + if (collection_id := _dict.get('collection_id')) is not None: + args['collection_id'] = collection_id + if (name := _dict.get('name')) is not None: + args['name'] = name + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Collection object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'collection_id') and getattr( + self, 'collection_id') is not None: + _dict['collection_id'] = getattr(self, 'collection_id') + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Collection object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'Collection') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'Collection') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class CollectionDetails: + """ + A collection for storing documents. + + :param str collection_id: (optional) The Universally Unique Identifier (UUID) of + the collection. + :param str name: The name of the collection. + :param str description: (optional) A description of the collection. + :param datetime created: (optional) The date that the collection was created. + :param str language: (optional) The language of the collection. For a list of + supported languages, see the [product + documentation](/docs/discovery-data?topic=discovery-data-language-support). + :param bool ocr_enabled: (optional) If set to `true`, optical character + recognition (OCR) is enabled. For more information, see [Optical character + recognition](/docs/discovery-data?topic=discovery-data-collections#ocr). + :param List[CollectionEnrichment] enrichments: (optional) An array of + enrichments that are applied to this collection. To get a list of enrichments + that are available for a project, use the [List enrichments](#listenrichments) + method. + If no enrichments are specified when the collection is created, the default + enrichments for the project type are applied. For more information about project + default settings, see the [product + documentation](/docs/discovery-data?topic=discovery-data-project-defaults). + :param CollectionDetailsSmartDocumentUnderstanding smart_document_understanding: + (optional) An object that describes the Smart Document Understanding model for a + collection. + """ + + def __init__( + self, + name: str, + *, + collection_id: Optional[str] = None, + description: Optional[str] = None, + created: Optional[datetime] = None, + language: Optional[str] = None, + ocr_enabled: Optional[bool] = None, + enrichments: Optional[List['CollectionEnrichment']] = None, + smart_document_understanding: Optional[ + 'CollectionDetailsSmartDocumentUnderstanding'] = None, + ) -> None: + """ + Initialize a CollectionDetails object. + + :param str name: The name of the collection. + :param str description: (optional) A description of the collection. + :param str language: (optional) The language of the collection. For a list + of supported languages, see the [product + documentation](/docs/discovery-data?topic=discovery-data-language-support). + :param bool ocr_enabled: (optional) If set to `true`, optical character + recognition (OCR) is enabled. For more information, see [Optical character + recognition](/docs/discovery-data?topic=discovery-data-collections#ocr). + :param List[CollectionEnrichment] enrichments: (optional) An array of + enrichments that are applied to this collection. To get a list of + enrichments that are available for a project, use the [List + enrichments](#listenrichments) method. + If no enrichments are specified when the collection is created, the default + enrichments for the project type are applied. For more information about + project default settings, see the [product + documentation](/docs/discovery-data?topic=discovery-data-project-defaults). + """ + self.collection_id = collection_id + self.name = name + self.description = description + self.created = created + self.language = language + self.ocr_enabled = ocr_enabled + self.enrichments = enrichments + self.smart_document_understanding = smart_document_understanding + + @classmethod + def from_dict(cls, _dict: Dict) -> 'CollectionDetails': + """Initialize a CollectionDetails object from a json dictionary.""" + args = {} + if (collection_id := _dict.get('collection_id')) is not None: + args['collection_id'] = collection_id + if (name := _dict.get('name')) is not None: + args['name'] = name + else: + raise ValueError( + 'Required property \'name\' not present in CollectionDetails JSON' + ) + if (description := _dict.get('description')) is not None: + args['description'] = description + if (created := _dict.get('created')) is not None: + args['created'] = string_to_datetime(created) + if (language := _dict.get('language')) is not None: + args['language'] = language + if (ocr_enabled := _dict.get('ocr_enabled')) is not None: + args['ocr_enabled'] = ocr_enabled + if (enrichments := _dict.get('enrichments')) is not None: + args['enrichments'] = [ + CollectionEnrichment.from_dict(v) for v in enrichments + ] + if (smart_document_understanding := + _dict.get('smart_document_understanding')) is not None: + args[ + 'smart_document_understanding'] = CollectionDetailsSmartDocumentUnderstanding.from_dict( + smart_document_understanding) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a CollectionDetails object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'collection_id') and getattr( + self, 'collection_id') is not None: + _dict['collection_id'] = getattr(self, 'collection_id') + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'created') and getattr(self, 'created') is not None: + _dict['created'] = datetime_to_string(getattr(self, 'created')) + if hasattr(self, 'language') and self.language is not None: + _dict['language'] = self.language + if hasattr(self, 'ocr_enabled') and self.ocr_enabled is not None: + _dict['ocr_enabled'] = self.ocr_enabled + if hasattr(self, 'enrichments') and self.enrichments is not None: + enrichments_list = [] + for v in self.enrichments: + if isinstance(v, dict): + enrichments_list.append(v) + else: + enrichments_list.append(v.to_dict()) + _dict['enrichments'] = enrichments_list + if hasattr(self, 'smart_document_understanding') and getattr( + self, 'smart_document_understanding') is not None: + if isinstance(getattr(self, 'smart_document_understanding'), dict): + _dict['smart_document_understanding'] = getattr( + self, 'smart_document_understanding') + else: + _dict['smart_document_understanding'] = getattr( + self, 'smart_document_understanding').to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this CollectionDetails object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'CollectionDetails') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'CollectionDetails') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class CollectionDetailsSmartDocumentUnderstanding: + """ + An object that describes the Smart Document Understanding model for a collection. + + :param bool enabled: (optional) When `true`, smart document understanding + conversion is enabled for the collection. + :param str model: (optional) Specifies the type of Smart Document Understanding + (SDU) model that is enabled for the collection. The following types of models + are supported: + * `custom`: A user-trained model is applied. + * `pre_trained`: A pretrained model is applied. This type of model is applied + automatically to *Document Retrieval for Contracts* projects. + * `text_extraction`: An SDU model that extracts text and metadata from the + content. This model is enabled in collections by default regardless of the types + of documents in the collection (as long as the service plan supports SDU + models). + You can apply user-trained or pretrained models to collections from the + *Identify fields* page of the product user interface. For more information, see + [the product + documentation](/docs/discovery-data?topic=discovery-data-configuring-fields). + """ + + def __init__( + self, + *, + enabled: Optional[bool] = None, + model: Optional[str] = None, + ) -> None: + """ + Initialize a CollectionDetailsSmartDocumentUnderstanding object. + + :param bool enabled: (optional) When `true`, smart document understanding + conversion is enabled for the collection. + :param str model: (optional) Specifies the type of Smart Document + Understanding (SDU) model that is enabled for the collection. The following + types of models are supported: + * `custom`: A user-trained model is applied. + * `pre_trained`: A pretrained model is applied. This type of model is + applied automatically to *Document Retrieval for Contracts* projects. + * `text_extraction`: An SDU model that extracts text and metadata from the + content. This model is enabled in collections by default regardless of the + types of documents in the collection (as long as the service plan supports + SDU models). + You can apply user-trained or pretrained models to collections from the + *Identify fields* page of the product user interface. For more information, + see [the product + documentation](/docs/discovery-data?topic=discovery-data-configuring-fields). + """ + self.enabled = enabled + self.model = model + + @classmethod + def from_dict(cls, + _dict: Dict) -> 'CollectionDetailsSmartDocumentUnderstanding': + """Initialize a CollectionDetailsSmartDocumentUnderstanding object from a json dictionary.""" + args = {} + if (enabled := _dict.get('enabled')) is not None: + args['enabled'] = enabled + if (model := _dict.get('model')) is not None: + args['model'] = model + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a CollectionDetailsSmartDocumentUnderstanding object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'enabled') and self.enabled is not None: + _dict['enabled'] = self.enabled + if hasattr(self, 'model') and self.model is not None: + _dict['model'] = self.model + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this CollectionDetailsSmartDocumentUnderstanding object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, + other: 'CollectionDetailsSmartDocumentUnderstanding') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, + other: 'CollectionDetailsSmartDocumentUnderstanding') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class ModelEnum(str, Enum): + """ + Specifies the type of Smart Document Understanding (SDU) model that is enabled for + the collection. The following types of models are supported: + * `custom`: A user-trained model is applied. + * `pre_trained`: A pretrained model is applied. This type of model is applied + automatically to *Document Retrieval for Contracts* projects. + * `text_extraction`: An SDU model that extracts text and metadata from the + content. This model is enabled in collections by default regardless of the types + of documents in the collection (as long as the service plan supports SDU models). + You can apply user-trained or pretrained models to collections from the *Identify + fields* page of the product user interface. For more information, see [the product + documentation](/docs/discovery-data?topic=discovery-data-configuring-fields). + """ + + CUSTOM = 'custom' + PRE_TRAINED = 'pre_trained' + TEXT_EXTRACTION = 'text_extraction' + + +class CollectionEnrichment: + """ + An object describing an enrichment for a collection. + + :param str enrichment_id: (optional) The unique identifier of this enrichment. + For more information about how to determine the ID of an enrichment, see [the + product + documentation](/docs/discovery-data?topic=discovery-data-manage-enrichments#enrichments-ids). + :param List[str] fields: (optional) An array of field names that the enrichment + is applied to. + If you apply an enrichment to a field from a JSON file, the data is converted to + an array automatically, even if the field contains a single value. + """ + + def __init__( + self, + *, + enrichment_id: Optional[str] = None, + fields: Optional[List[str]] = None, + ) -> None: + """ + Initialize a CollectionEnrichment object. + + :param str enrichment_id: (optional) The unique identifier of this + enrichment. For more information about how to determine the ID of an + enrichment, see [the product + documentation](/docs/discovery-data?topic=discovery-data-manage-enrichments#enrichments-ids). + :param List[str] fields: (optional) An array of field names that the + enrichment is applied to. + If you apply an enrichment to a field from a JSON file, the data is + converted to an array automatically, even if the field contains a single + value. + """ + self.enrichment_id = enrichment_id + self.fields = fields + + @classmethod + def from_dict(cls, _dict: Dict) -> 'CollectionEnrichment': + """Initialize a CollectionEnrichment object from a json dictionary.""" + args = {} + if (enrichment_id := _dict.get('enrichment_id')) is not None: + args['enrichment_id'] = enrichment_id + if (fields := _dict.get('fields')) is not None: + args['fields'] = fields + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a CollectionEnrichment object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'enrichment_id') and self.enrichment_id is not None: + _dict['enrichment_id'] = self.enrichment_id + if hasattr(self, 'fields') and self.fields is not None: + _dict['fields'] = self.fields + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this CollectionEnrichment object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'CollectionEnrichment') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'CollectionEnrichment') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Completions: + """ + An object that contains an array of autocompletion suggestions. + + :param List[str] completions: (optional) Array of autocomplete suggestion based + on the provided prefix. + """ + + def __init__( + self, + *, + completions: Optional[List[str]] = None, + ) -> None: + """ + Initialize a Completions object. + + :param List[str] completions: (optional) Array of autocomplete suggestion + based on the provided prefix. + """ + self.completions = completions + + @classmethod + def from_dict(cls, _dict: Dict) -> 'Completions': + """Initialize a Completions object from a json dictionary.""" + args = {} + if (completions := _dict.get('completions')) is not None: + args['completions'] = completions + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Completions object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'completions') and self.completions is not None: + _dict['completions'] = self.completions + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Completions object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'Completions') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'Completions') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ComponentSettingsAggregation: + """ + Display settings for aggregations. + + :param str name: (optional) Identifier used to map aggregation settings to + aggregation configuration. + :param str label: (optional) User-friendly alias for the aggregation. + :param bool multiple_selections_allowed: (optional) Whether users is allowed to + select more than one of the aggregation terms. + :param str visualization_type: (optional) Type of visualization to use when + rendering the aggregation. + """ + + def __init__( + self, + *, + name: Optional[str] = None, + label: Optional[str] = None, + multiple_selections_allowed: Optional[bool] = None, + visualization_type: Optional[str] = None, + ) -> None: + """ + Initialize a ComponentSettingsAggregation object. + + :param str name: (optional) Identifier used to map aggregation settings to + aggregation configuration. + :param str label: (optional) User-friendly alias for the aggregation. + :param bool multiple_selections_allowed: (optional) Whether users is + allowed to select more than one of the aggregation terms. + :param str visualization_type: (optional) Type of visualization to use when + rendering the aggregation. + """ + self.name = name + self.label = label + self.multiple_selections_allowed = multiple_selections_allowed + self.visualization_type = visualization_type + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ComponentSettingsAggregation': + """Initialize a ComponentSettingsAggregation object from a json dictionary.""" + args = {} + if (name := _dict.get('name')) is not None: + args['name'] = name + if (label := _dict.get('label')) is not None: + args['label'] = label + if (multiple_selections_allowed := + _dict.get('multiple_selections_allowed')) is not None: + args['multiple_selections_allowed'] = multiple_selections_allowed + if (visualization_type := _dict.get('visualization_type')) is not None: + args['visualization_type'] = visualization_type + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ComponentSettingsAggregation object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'label') and self.label is not None: + _dict['label'] = self.label + if hasattr(self, 'multiple_selections_allowed' + ) and self.multiple_selections_allowed is not None: + _dict[ + 'multiple_selections_allowed'] = self.multiple_selections_allowed + if hasattr( + self, + 'visualization_type') and self.visualization_type is not None: + _dict['visualization_type'] = self.visualization_type + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ComponentSettingsAggregation object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ComponentSettingsAggregation') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ComponentSettingsAggregation') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class VisualizationTypeEnum(str, Enum): + """ + Type of visualization to use when rendering the aggregation. + """ + + AUTO = 'auto' + FACET_TABLE = 'facet_table' + WORD_CLOUD = 'word_cloud' + MAP = 'map' + + +class ComponentSettingsFieldsShown: + """ + Fields shown in the results section of the UI. + + :param ComponentSettingsFieldsShownBody body: (optional) Body label. + :param ComponentSettingsFieldsShownTitle title: (optional) Title label. + """ + + def __init__( + self, + *, + body: Optional['ComponentSettingsFieldsShownBody'] = None, + title: Optional['ComponentSettingsFieldsShownTitle'] = None, + ) -> None: + """ + Initialize a ComponentSettingsFieldsShown object. + + :param ComponentSettingsFieldsShownBody body: (optional) Body label. + :param ComponentSettingsFieldsShownTitle title: (optional) Title label. + """ + self.body = body + self.title = title + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ComponentSettingsFieldsShown': + """Initialize a ComponentSettingsFieldsShown object from a json dictionary.""" + args = {} + if (body := _dict.get('body')) is not None: + args['body'] = ComponentSettingsFieldsShownBody.from_dict(body) + if (title := _dict.get('title')) is not None: + args['title'] = ComponentSettingsFieldsShownTitle.from_dict(title) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ComponentSettingsFieldsShown object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'body') and self.body is not None: + if isinstance(self.body, dict): + _dict['body'] = self.body + else: + _dict['body'] = self.body.to_dict() + if hasattr(self, 'title') and self.title is not None: + if isinstance(self.title, dict): + _dict['title'] = self.title + else: + _dict['title'] = self.title.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ComponentSettingsFieldsShown object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ComponentSettingsFieldsShown') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ComponentSettingsFieldsShown') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ComponentSettingsFieldsShownBody: + """ + Body label. + + :param bool use_passage: (optional) Use the whole passage as the body. + :param str field: (optional) Use a specific field as the title. + """ + + def __init__( + self, + *, + use_passage: Optional[bool] = None, + field: Optional[str] = None, + ) -> None: + """ + Initialize a ComponentSettingsFieldsShownBody object. + + :param bool use_passage: (optional) Use the whole passage as the body. + :param str field: (optional) Use a specific field as the title. + """ + self.use_passage = use_passage + self.field = field + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ComponentSettingsFieldsShownBody': + """Initialize a ComponentSettingsFieldsShownBody object from a json dictionary.""" + args = {} + if (use_passage := _dict.get('use_passage')) is not None: + args['use_passage'] = use_passage + if (field := _dict.get('field')) is not None: + args['field'] = field + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ComponentSettingsFieldsShownBody object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'use_passage') and self.use_passage is not None: + _dict['use_passage'] = self.use_passage + if hasattr(self, 'field') and self.field is not None: + _dict['field'] = self.field + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ComponentSettingsFieldsShownBody object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ComponentSettingsFieldsShownBody') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ComponentSettingsFieldsShownBody') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ComponentSettingsFieldsShownTitle: + """ + Title label. + + :param str field: (optional) Use a specific field as the title. + """ + + def __init__( + self, + *, + field: Optional[str] = None, + ) -> None: + """ + Initialize a ComponentSettingsFieldsShownTitle object. + + :param str field: (optional) Use a specific field as the title. + """ + self.field = field + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ComponentSettingsFieldsShownTitle': + """Initialize a ComponentSettingsFieldsShownTitle object from a json dictionary.""" + args = {} + if (field := _dict.get('field')) is not None: + args['field'] = field + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ComponentSettingsFieldsShownTitle object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'field') and self.field is not None: + _dict['field'] = self.field + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ComponentSettingsFieldsShownTitle object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ComponentSettingsFieldsShownTitle') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ComponentSettingsFieldsShownTitle') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ComponentSettingsResponse: + """ + The default component settings for this project. + + :param ComponentSettingsFieldsShown fields_shown: (optional) Fields shown in the + results section of the UI. + :param bool autocomplete: (optional) Whether or not autocomplete is enabled. + :param bool structured_search: (optional) Whether or not structured search is + enabled. + :param int results_per_page: (optional) Number or results shown per page. + :param List[ComponentSettingsAggregation] aggregations: (optional) a list of + component setting aggregations. + """ + + def __init__( + self, + *, + fields_shown: Optional['ComponentSettingsFieldsShown'] = None, + autocomplete: Optional[bool] = None, + structured_search: Optional[bool] = None, + results_per_page: Optional[int] = None, + aggregations: Optional[List['ComponentSettingsAggregation']] = None, + ) -> None: + """ + Initialize a ComponentSettingsResponse object. + + :param ComponentSettingsFieldsShown fields_shown: (optional) Fields shown + in the results section of the UI. + :param bool autocomplete: (optional) Whether or not autocomplete is + enabled. + :param bool structured_search: (optional) Whether or not structured search + is enabled. + :param int results_per_page: (optional) Number or results shown per page. + :param List[ComponentSettingsAggregation] aggregations: (optional) a list + of component setting aggregations. + """ + self.fields_shown = fields_shown + self.autocomplete = autocomplete + self.structured_search = structured_search + self.results_per_page = results_per_page + self.aggregations = aggregations + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ComponentSettingsResponse': + """Initialize a ComponentSettingsResponse object from a json dictionary.""" + args = {} + if (fields_shown := _dict.get('fields_shown')) is not None: + args['fields_shown'] = ComponentSettingsFieldsShown.from_dict( + fields_shown) + if (autocomplete := _dict.get('autocomplete')) is not None: + args['autocomplete'] = autocomplete + if (structured_search := _dict.get('structured_search')) is not None: + args['structured_search'] = structured_search + if (results_per_page := _dict.get('results_per_page')) is not None: + args['results_per_page'] = results_per_page + if (aggregations := _dict.get('aggregations')) is not None: + args['aggregations'] = [ + ComponentSettingsAggregation.from_dict(v) for v in aggregations + ] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ComponentSettingsResponse object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'fields_shown') and self.fields_shown is not None: + if isinstance(self.fields_shown, dict): + _dict['fields_shown'] = self.fields_shown + else: + _dict['fields_shown'] = self.fields_shown.to_dict() + if hasattr(self, 'autocomplete') and self.autocomplete is not None: + _dict['autocomplete'] = self.autocomplete + if hasattr(self, + 'structured_search') and self.structured_search is not None: + _dict['structured_search'] = self.structured_search + if hasattr(self, + 'results_per_page') and self.results_per_page is not None: + _dict['results_per_page'] = self.results_per_page + if hasattr(self, 'aggregations') and self.aggregations is not None: + aggregations_list = [] + for v in self.aggregations: + if isinstance(v, dict): + aggregations_list.append(v) + else: + aggregations_list.append(v.to_dict()) + _dict['aggregations'] = aggregations_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ComponentSettingsResponse object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ComponentSettingsResponse') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ComponentSettingsResponse') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class CreateDocumentClassifier: + """ + An object that manages the settings and data that is required to train a document + classification model. + + :param str name: A human-readable name of the document classifier. + :param str description: (optional) A description of the document classifier. + :param str language: The language of the training data that is associated with + the document classifier. Language is specified by using the ISO 639-1 language + code, such as `en` for English or `ja` for Japanese. + :param str answer_field: The name of the field from the training and test data + that contains the classification labels. + :param List[DocumentClassifierEnrichment] enrichments: (optional) An array of + enrichments to apply to the data that is used to train and test the document + classifier. The output from the enrichments is used as features by the + classifier to classify the document content both during training and at run + time. + :param ClassifierFederatedModel federated_classification: (optional) An object + with details for creating federated document classifier models. + """ + + def __init__( + self, + name: str, + language: str, + answer_field: str, + *, + description: Optional[str] = None, + enrichments: Optional[List['DocumentClassifierEnrichment']] = None, + federated_classification: Optional['ClassifierFederatedModel'] = None, + ) -> None: + """ + Initialize a CreateDocumentClassifier object. + + :param str name: A human-readable name of the document classifier. + :param str language: The language of the training data that is associated + with the document classifier. Language is specified by using the ISO 639-1 + language code, such as `en` for English or `ja` for Japanese. + :param str answer_field: The name of the field from the training and test + data that contains the classification labels. + :param str description: (optional) A description of the document + classifier. + :param List[DocumentClassifierEnrichment] enrichments: (optional) An array + of enrichments to apply to the data that is used to train and test the + document classifier. The output from the enrichments is used as features by + the classifier to classify the document content both during training and at + run time. + :param ClassifierFederatedModel federated_classification: (optional) An + object with details for creating federated document classifier models. + """ + self.name = name + self.description = description + self.language = language + self.answer_field = answer_field + self.enrichments = enrichments + self.federated_classification = federated_classification + + @classmethod + def from_dict(cls, _dict: Dict) -> 'CreateDocumentClassifier': + """Initialize a CreateDocumentClassifier object from a json dictionary.""" + args = {} + if (name := _dict.get('name')) is not None: + args['name'] = name + else: + raise ValueError( + 'Required property \'name\' not present in CreateDocumentClassifier JSON' + ) + if (description := _dict.get('description')) is not None: + args['description'] = description + if (language := _dict.get('language')) is not None: + args['language'] = language + else: + raise ValueError( + 'Required property \'language\' not present in CreateDocumentClassifier JSON' + ) + if (answer_field := _dict.get('answer_field')) is not None: + args['answer_field'] = answer_field + else: + raise ValueError( + 'Required property \'answer_field\' not present in CreateDocumentClassifier JSON' + ) + if (enrichments := _dict.get('enrichments')) is not None: + args['enrichments'] = [ + DocumentClassifierEnrichment.from_dict(v) for v in enrichments + ] + if (federated_classification := + _dict.get('federated_classification')) is not None: + args[ + 'federated_classification'] = ClassifierFederatedModel.from_dict( + federated_classification) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a CreateDocumentClassifier object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'language') and self.language is not None: + _dict['language'] = self.language + if hasattr(self, 'answer_field') and self.answer_field is not None: + _dict['answer_field'] = self.answer_field + if hasattr(self, 'enrichments') and self.enrichments is not None: + enrichments_list = [] + for v in self.enrichments: + if isinstance(v, dict): + enrichments_list.append(v) + else: + enrichments_list.append(v.to_dict()) + _dict['enrichments'] = enrichments_list + if hasattr(self, 'federated_classification' + ) and self.federated_classification is not None: + if isinstance(self.federated_classification, dict): + _dict[ + 'federated_classification'] = self.federated_classification + else: + _dict[ + 'federated_classification'] = self.federated_classification.to_dict( + ) + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this CreateDocumentClassifier object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'CreateDocumentClassifier') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'CreateDocumentClassifier') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class CreateEnrichment: + """ + Information about a specific enrichment. + + :param str name: (optional) The human readable name for this enrichment. + :param str description: (optional) The description of this enrichment. + :param str type: (optional) The type of this enrichment. The following types are + supported: + * `classifier`: Creates a document classifier enrichment from a document + classifier model that you create by using the [Document classifier + API](/apidocs/discovery-data#createdocumentclassifier). **Note**: A text + classifier enrichment can be created only from the product user interface. + * `dictionary`: Creates a custom dictionary enrichment that you define in a CSV + file. + * `regular_expression`: Creates a custom regular expression enrichment from + regex syntax that you specify in the request. + * `rule_based`: Creates an enrichment from an advanced rules model that is + created and exported as a ZIP file from Watson Knowledge Studio. + * `uima_annotator`: Creates an enrichment from a custom UIMA text analysis model + that is defined in a PEAR file created in one of the following ways: + * Watson Explorer Content Analytics Studio. **Note**: Supported in IBM Cloud + Pak for Data instances only. + * Rule-based model that is created in Watson Knowledge Studio. + * `watson_knowledge_studio_model`: Creates an enrichment from a Watson Knowledge + Studio machine learning model that is defined in a ZIP file. + * `webhook`: Connects to an external enrichment application by using a webhook. + * `sentence_classifier`: Use sentence classifier to classify sentences in your + documents. This feature is available in IBM Cloud-managed instances only. The + sentence classifier feature is beta functionality. Beta features are not + supported by the SDKs. + :param EnrichmentOptions options: (optional) An object that contains options for + the current enrichment. Starting with version `2020-08-30`, the enrichment + options are not included in responses from the List Enrichments method. + """ + + def __init__( + self, + *, + name: Optional[str] = None, + description: Optional[str] = None, + type: Optional[str] = None, + options: Optional['EnrichmentOptions'] = None, + ) -> None: + """ + Initialize a CreateEnrichment object. + + :param str name: (optional) The human readable name for this enrichment. + :param str description: (optional) The description of this enrichment. + :param str type: (optional) The type of this enrichment. The following + types are supported: + * `classifier`: Creates a document classifier enrichment from a document + classifier model that you create by using the [Document classifier + API](/apidocs/discovery-data#createdocumentclassifier). **Note**: A text + classifier enrichment can be created only from the product user interface. + * `dictionary`: Creates a custom dictionary enrichment that you define in a + CSV file. + * `regular_expression`: Creates a custom regular expression enrichment from + regex syntax that you specify in the request. + * `rule_based`: Creates an enrichment from an advanced rules model that is + created and exported as a ZIP file from Watson Knowledge Studio. + * `uima_annotator`: Creates an enrichment from a custom UIMA text analysis + model that is defined in a PEAR file created in one of the following ways: + * Watson Explorer Content Analytics Studio. **Note**: Supported in IBM + Cloud Pak for Data instances only. + * Rule-based model that is created in Watson Knowledge Studio. + * `watson_knowledge_studio_model`: Creates an enrichment from a Watson + Knowledge Studio machine learning model that is defined in a ZIP file. + * `webhook`: Connects to an external enrichment application by using a + webhook. + * `sentence_classifier`: Use sentence classifier to classify sentences in + your documents. This feature is available in IBM Cloud-managed instances + only. The sentence classifier feature is beta functionality. Beta features + are not supported by the SDKs. + :param EnrichmentOptions options: (optional) An object that contains + options for the current enrichment. Starting with version `2020-08-30`, the + enrichment options are not included in responses from the List Enrichments + method. + """ + self.name = name + self.description = description + self.type = type + self.options = options + + @classmethod + def from_dict(cls, _dict: Dict) -> 'CreateEnrichment': + """Initialize a CreateEnrichment object from a json dictionary.""" + args = {} + if (name := _dict.get('name')) is not None: + args['name'] = name + if (description := _dict.get('description')) is not None: + args['description'] = description + if (type := _dict.get('type')) is not None: + args['type'] = type + if (options := _dict.get('options')) is not None: + args['options'] = EnrichmentOptions.from_dict(options) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a CreateEnrichment object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'options') and self.options is not None: + if isinstance(self.options, dict): + _dict['options'] = self.options + else: + _dict['options'] = self.options.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this CreateEnrichment object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'CreateEnrichment') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'CreateEnrichment') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class TypeEnum(str, Enum): + """ + The type of this enrichment. The following types are supported: + * `classifier`: Creates a document classifier enrichment from a document + classifier model that you create by using the [Document classifier + API](/apidocs/discovery-data#createdocumentclassifier). **Note**: A text + classifier enrichment can be created only from the product user interface. + * `dictionary`: Creates a custom dictionary enrichment that you define in a CSV + file. + * `regular_expression`: Creates a custom regular expression enrichment from regex + syntax that you specify in the request. + * `rule_based`: Creates an enrichment from an advanced rules model that is created + and exported as a ZIP file from Watson Knowledge Studio. + * `uima_annotator`: Creates an enrichment from a custom UIMA text analysis model + that is defined in a PEAR file created in one of the following ways: + * Watson Explorer Content Analytics Studio. **Note**: Supported in IBM Cloud + Pak for Data instances only. + * Rule-based model that is created in Watson Knowledge Studio. + * `watson_knowledge_studio_model`: Creates an enrichment from a Watson Knowledge + Studio machine learning model that is defined in a ZIP file. + * `webhook`: Connects to an external enrichment application by using a webhook. + * `sentence_classifier`: Use sentence classifier to classify sentences in your + documents. This feature is available in IBM Cloud-managed instances only. The + sentence classifier feature is beta functionality. Beta features are not supported + by the SDKs. + """ + + CLASSIFIER = 'classifier' + DICTIONARY = 'dictionary' + REGULAR_EXPRESSION = 'regular_expression' + UIMA_ANNOTATOR = 'uima_annotator' + RULE_BASED = 'rule_based' + WATSON_KNOWLEDGE_STUDIO_MODEL = 'watson_knowledge_studio_model' + WEBHOOK = 'webhook' + SENTENCE_CLASSIFIER = 'sentence_classifier' + + +class DefaultQueryParams: + """ + Default query parameters for this project. + + :param List[str] collection_ids: (optional) An array of collection identifiers + to query. If empty or omitted all collections in the project are queried. + :param DefaultQueryParamsPassages passages: (optional) Default settings + configuration for passage search options. + :param DefaultQueryParamsTableResults table_results: (optional) Default project + query settings for table results. + :param str aggregation: (optional) A string representing the default aggregation + query for the project. + :param DefaultQueryParamsSuggestedRefinements suggested_refinements: (optional) + Object that contains suggested refinement settings. + **Note**: The `suggested_refinements` parameter that identified dynamic facets + from the data is deprecated. + :param bool spelling_suggestions: (optional) When `true`, a spelling suggestions + for the query are returned by default. + :param bool highlight: (optional) When `true`, highlights for the query are + returned by default. + :param int count: (optional) The number of document results returned by default. + :param str sort: (optional) A comma separated list of document fields to sort + results by default. + :param List[str] return_: (optional) An array of field names to return in + document results if present by default. + """ + + def __init__( + self, + *, + collection_ids: Optional[List[str]] = None, + passages: Optional['DefaultQueryParamsPassages'] = None, + table_results: Optional['DefaultQueryParamsTableResults'] = None, + aggregation: Optional[str] = None, + suggested_refinements: Optional[ + 'DefaultQueryParamsSuggestedRefinements'] = None, + spelling_suggestions: Optional[bool] = None, + highlight: Optional[bool] = None, + count: Optional[int] = None, + sort: Optional[str] = None, + return_: Optional[List[str]] = None, + ) -> None: + """ + Initialize a DefaultQueryParams object. + + :param List[str] collection_ids: (optional) An array of collection + identifiers to query. If empty or omitted all collections in the project + are queried. + :param DefaultQueryParamsPassages passages: (optional) Default settings + configuration for passage search options. + :param DefaultQueryParamsTableResults table_results: (optional) Default + project query settings for table results. + :param str aggregation: (optional) A string representing the default + aggregation query for the project. + :param DefaultQueryParamsSuggestedRefinements suggested_refinements: + (optional) Object that contains suggested refinement settings. + **Note**: The `suggested_refinements` parameter that identified dynamic + facets from the data is deprecated. + :param bool spelling_suggestions: (optional) When `true`, a spelling + suggestions for the query are returned by default. + :param bool highlight: (optional) When `true`, highlights for the query are + returned by default. + :param int count: (optional) The number of document results returned by + default. + :param str sort: (optional) A comma separated list of document fields to + sort results by default. + :param List[str] return_: (optional) An array of field names to return in + document results if present by default. + """ + self.collection_ids = collection_ids + self.passages = passages + self.table_results = table_results + self.aggregation = aggregation + self.suggested_refinements = suggested_refinements + self.spelling_suggestions = spelling_suggestions + self.highlight = highlight + self.count = count + self.sort = sort + self.return_ = return_ + + @classmethod + def from_dict(cls, _dict: Dict) -> 'DefaultQueryParams': + """Initialize a DefaultQueryParams object from a json dictionary.""" + args = {} + if (collection_ids := _dict.get('collection_ids')) is not None: + args['collection_ids'] = collection_ids + if (passages := _dict.get('passages')) is not None: + args['passages'] = DefaultQueryParamsPassages.from_dict(passages) + if (table_results := _dict.get('table_results')) is not None: + args['table_results'] = DefaultQueryParamsTableResults.from_dict( + table_results) + if (aggregation := _dict.get('aggregation')) is not None: + args['aggregation'] = aggregation + if (suggested_refinements := + _dict.get('suggested_refinements')) is not None: + args[ + 'suggested_refinements'] = DefaultQueryParamsSuggestedRefinements.from_dict( + suggested_refinements) + if (spelling_suggestions := + _dict.get('spelling_suggestions')) is not None: + args['spelling_suggestions'] = spelling_suggestions + if (highlight := _dict.get('highlight')) is not None: + args['highlight'] = highlight + if (count := _dict.get('count')) is not None: + args['count'] = count + if (sort := _dict.get('sort')) is not None: + args['sort'] = sort + if (return_ := _dict.get('return')) is not None: + args['return_'] = return_ + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DefaultQueryParams object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'collection_ids') and self.collection_ids is not None: + _dict['collection_ids'] = self.collection_ids + if hasattr(self, 'passages') and self.passages is not None: + if isinstance(self.passages, dict): + _dict['passages'] = self.passages + else: + _dict['passages'] = self.passages.to_dict() + if hasattr(self, 'table_results') and self.table_results is not None: + if isinstance(self.table_results, dict): + _dict['table_results'] = self.table_results + else: + _dict['table_results'] = self.table_results.to_dict() + if hasattr(self, 'aggregation') and self.aggregation is not None: + _dict['aggregation'] = self.aggregation + if hasattr(self, 'suggested_refinements' + ) and self.suggested_refinements is not None: + if isinstance(self.suggested_refinements, dict): + _dict['suggested_refinements'] = self.suggested_refinements + else: + _dict[ + 'suggested_refinements'] = self.suggested_refinements.to_dict( + ) + if hasattr(self, 'spelling_suggestions' + ) and self.spelling_suggestions is not None: + _dict['spelling_suggestions'] = self.spelling_suggestions + if hasattr(self, 'highlight') and self.highlight is not None: + _dict['highlight'] = self.highlight + if hasattr(self, 'count') and self.count is not None: + _dict['count'] = self.count + if hasattr(self, 'sort') and self.sort is not None: + _dict['sort'] = self.sort + if hasattr(self, 'return_') and self.return_ is not None: + _dict['return'] = self.return_ + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DefaultQueryParams object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'DefaultQueryParams') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'DefaultQueryParams') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class DefaultQueryParamsPassages: + """ + Default settings configuration for passage search options. + + :param bool enabled: (optional) When `true`, a passage search is performed by + default. + :param int count: (optional) The number of passages to return. + :param List[str] fields: (optional) An array of field names to perform the + passage search on. + :param int characters: (optional) The approximate number of characters that each + returned passage will contain. + :param bool per_document: (optional) When `true` the number of passages that can + be returned from a single document is restricted to the *max_per_document* + value. + :param int max_per_document: (optional) The default maximum number of passages + that can be taken from a single document as the result of a passage query. + """ + + def __init__( + self, + *, + enabled: Optional[bool] = None, + count: Optional[int] = None, + fields: Optional[List[str]] = None, + characters: Optional[int] = None, + per_document: Optional[bool] = None, + max_per_document: Optional[int] = None, + ) -> None: + """ + Initialize a DefaultQueryParamsPassages object. + + :param bool enabled: (optional) When `true`, a passage search is performed + by default. + :param int count: (optional) The number of passages to return. + :param List[str] fields: (optional) An array of field names to perform the + passage search on. + :param int characters: (optional) The approximate number of characters that + each returned passage will contain. + :param bool per_document: (optional) When `true` the number of passages + that can be returned from a single document is restricted to the + *max_per_document* value. + :param int max_per_document: (optional) The default maximum number of + passages that can be taken from a single document as the result of a + passage query. + """ + self.enabled = enabled + self.count = count + self.fields = fields + self.characters = characters + self.per_document = per_document + self.max_per_document = max_per_document + + @classmethod + def from_dict(cls, _dict: Dict) -> 'DefaultQueryParamsPassages': + """Initialize a DefaultQueryParamsPassages object from a json dictionary.""" + args = {} + if (enabled := _dict.get('enabled')) is not None: + args['enabled'] = enabled + if (count := _dict.get('count')) is not None: + args['count'] = count + if (fields := _dict.get('fields')) is not None: + args['fields'] = fields + if (characters := _dict.get('characters')) is not None: + args['characters'] = characters + if (per_document := _dict.get('per_document')) is not None: + args['per_document'] = per_document + if (max_per_document := _dict.get('max_per_document')) is not None: + args['max_per_document'] = max_per_document + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DefaultQueryParamsPassages object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'enabled') and self.enabled is not None: + _dict['enabled'] = self.enabled + if hasattr(self, 'count') and self.count is not None: + _dict['count'] = self.count + if hasattr(self, 'fields') and self.fields is not None: + _dict['fields'] = self.fields + if hasattr(self, 'characters') and self.characters is not None: + _dict['characters'] = self.characters + if hasattr(self, 'per_document') and self.per_document is not None: + _dict['per_document'] = self.per_document + if hasattr(self, + 'max_per_document') and self.max_per_document is not None: + _dict['max_per_document'] = self.max_per_document + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DefaultQueryParamsPassages object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'DefaultQueryParamsPassages') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'DefaultQueryParamsPassages') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class DefaultQueryParamsSuggestedRefinements: + """ + Object that contains suggested refinement settings. + **Note**: The `suggested_refinements` parameter that identified dynamic facets from + the data is deprecated. + + :param bool enabled: (optional) When `true`, suggested refinements for the query + are returned by default. + :param int count: (optional) The number of suggested refinements to return by + default. + """ + + def __init__( + self, + *, + enabled: Optional[bool] = None, + count: Optional[int] = None, + ) -> None: + """ + Initialize a DefaultQueryParamsSuggestedRefinements object. + + :param bool enabled: (optional) When `true`, suggested refinements for the + query are returned by default. + :param int count: (optional) The number of suggested refinements to return + by default. + """ + self.enabled = enabled + self.count = count + + @classmethod + def from_dict(cls, _dict: Dict) -> 'DefaultQueryParamsSuggestedRefinements': + """Initialize a DefaultQueryParamsSuggestedRefinements object from a json dictionary.""" + args = {} + if (enabled := _dict.get('enabled')) is not None: + args['enabled'] = enabled + if (count := _dict.get('count')) is not None: + args['count'] = count + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DefaultQueryParamsSuggestedRefinements object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'enabled') and self.enabled is not None: + _dict['enabled'] = self.enabled + if hasattr(self, 'count') and self.count is not None: + _dict['count'] = self.count + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DefaultQueryParamsSuggestedRefinements object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'DefaultQueryParamsSuggestedRefinements') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'DefaultQueryParamsSuggestedRefinements') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class DefaultQueryParamsTableResults: + """ + Default project query settings for table results. + + :param bool enabled: (optional) When `true`, a table results for the query are + returned by default. + :param int count: (optional) The number of table results to return by default. + :param int per_document: (optional) The number of table results to include in + each result document. + """ + + def __init__( + self, + *, + enabled: Optional[bool] = None, + count: Optional[int] = None, + per_document: Optional[int] = None, + ) -> None: + """ + Initialize a DefaultQueryParamsTableResults object. + + :param bool enabled: (optional) When `true`, a table results for the query + are returned by default. + :param int count: (optional) The number of table results to return by + default. + :param int per_document: (optional) The number of table results to include + in each result document. + """ + self.enabled = enabled + self.count = count + self.per_document = per_document + + @classmethod + def from_dict(cls, _dict: Dict) -> 'DefaultQueryParamsTableResults': + """Initialize a DefaultQueryParamsTableResults object from a json dictionary.""" + args = {} + if (enabled := _dict.get('enabled')) is not None: + args['enabled'] = enabled + if (count := _dict.get('count')) is not None: + args['count'] = count + if (per_document := _dict.get('per_document')) is not None: + args['per_document'] = per_document + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DefaultQueryParamsTableResults object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'enabled') and self.enabled is not None: + _dict['enabled'] = self.enabled + if hasattr(self, 'count') and self.count is not None: + _dict['count'] = self.count + if hasattr(self, 'per_document') and self.per_document is not None: + _dict['per_document'] = self.per_document + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DefaultQueryParamsTableResults object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'DefaultQueryParamsTableResults') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'DefaultQueryParamsTableResults') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class DeleteDocumentResponse: + """ + Information returned when a document is deleted. + + :param str document_id: (optional) The unique identifier of the document. + :param str status: (optional) Status of the document. A deleted document has the + status deleted. + """ + + def __init__( + self, + *, + document_id: Optional[str] = None, + status: Optional[str] = None, + ) -> None: + """ + Initialize a DeleteDocumentResponse object. + + :param str document_id: (optional) The unique identifier of the document. + :param str status: (optional) Status of the document. A deleted document + has the status deleted. + """ + self.document_id = document_id + self.status = status + + @classmethod + def from_dict(cls, _dict: Dict) -> 'DeleteDocumentResponse': + """Initialize a DeleteDocumentResponse object from a json dictionary.""" + args = {} + if (document_id := _dict.get('document_id')) is not None: + args['document_id'] = document_id + if (status := _dict.get('status')) is not None: + args['status'] = status + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DeleteDocumentResponse object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'document_id') and self.document_id is not None: + _dict['document_id'] = self.document_id + if hasattr(self, 'status') and self.status is not None: + _dict['status'] = self.status + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DeleteDocumentResponse object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'DeleteDocumentResponse') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'DeleteDocumentResponse') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class StatusEnum(str, Enum): + """ + Status of the document. A deleted document has the status deleted. + """ + + DELETED = 'deleted' + + +class DocumentAccepted: + """ + Information returned after an uploaded document is accepted. + + :param str document_id: (optional) The unique identifier of the ingested + document. + :param str status: (optional) Status of the document in the ingestion process. A + status of `processing` is returned for documents that are ingested with a + *version* date before `2019-01-01`. The `pending` status is returned for all + others. + """ + + def __init__( + self, + *, + document_id: Optional[str] = None, + status: Optional[str] = None, + ) -> None: + """ + Initialize a DocumentAccepted object. + + :param str document_id: (optional) The unique identifier of the ingested + document. + :param str status: (optional) Status of the document in the ingestion + process. A status of `processing` is returned for documents that are + ingested with a *version* date before `2019-01-01`. The `pending` status is + returned for all others. + """ + self.document_id = document_id + self.status = status + + @classmethod + def from_dict(cls, _dict: Dict) -> 'DocumentAccepted': + """Initialize a DocumentAccepted object from a json dictionary.""" + args = {} + if (document_id := _dict.get('document_id')) is not None: + args['document_id'] = document_id + if (status := _dict.get('status')) is not None: + args['status'] = status + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DocumentAccepted object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'document_id') and self.document_id is not None: + _dict['document_id'] = self.document_id + if hasattr(self, 'status') and self.status is not None: + _dict['status'] = self.status + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DocumentAccepted object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'DocumentAccepted') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'DocumentAccepted') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class StatusEnum(str, Enum): + """ + Status of the document in the ingestion process. A status of `processing` is + returned for documents that are ingested with a *version* date before + `2019-01-01`. The `pending` status is returned for all others. + """ + + PROCESSING = 'processing' + PENDING = 'pending' + + +class DocumentAttribute: + """ + List of document attributes. + + :param str type: (optional) The type of attribute. + :param str text: (optional) The text associated with the attribute. + :param TableElementLocation location: (optional) The numeric location of the + identified element in the document, represented with two integers labeled + `begin` and `end`. + """ + + def __init__( + self, + *, + type: Optional[str] = None, + text: Optional[str] = None, + location: Optional['TableElementLocation'] = None, + ) -> None: + """ + Initialize a DocumentAttribute object. + + :param str type: (optional) The type of attribute. + :param str text: (optional) The text associated with the attribute. + :param TableElementLocation location: (optional) The numeric location of + the identified element in the document, represented with two integers + labeled `begin` and `end`. + """ + self.type = type + self.text = text + self.location = location + + @classmethod + def from_dict(cls, _dict: Dict) -> 'DocumentAttribute': + """Initialize a DocumentAttribute object from a json dictionary.""" + args = {} + if (type := _dict.get('type')) is not None: + args['type'] = type + if (text := _dict.get('text')) is not None: + args['text'] = text + if (location := _dict.get('location')) is not None: + args['location'] = TableElementLocation.from_dict(location) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DocumentAttribute object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'text') and self.text is not None: + _dict['text'] = self.text + if hasattr(self, 'location') and self.location is not None: + if isinstance(self.location, dict): + _dict['location'] = self.location + else: + _dict['location'] = self.location.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DocumentAttribute object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'DocumentAttribute') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'DocumentAttribute') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class DocumentClassifier: + """ + Information about a document classifier. + + :param str classifier_id: (optional) The Universally Unique Identifier (UUID) of + the document classifier. + :param str name: A human-readable name of the document classifier. + :param str description: (optional) A description of the document classifier. + :param datetime created: (optional) The date that the document classifier was + created. + :param str language: (optional) The language of the training data that is + associated with the document classifier. Language is specified by using the ISO + 639-1 language code, such as `en` for English or `ja` for Japanese. + :param List[DocumentClassifierEnrichment] enrichments: (optional) An array of + enrichments to apply to the data that is used to train and test the document + classifier. The output from the enrichments is used as features by the + classifier to classify the document content both during training and at run + time. + :param List[str] recognized_fields: (optional) An array of fields that are used + to train the document classifier. The same set of fields must exist in the + training data, the test data, and the documents where the resulting document + classifier enrichment is applied at run time. + :param str answer_field: (optional) The name of the field from the training and + test data that contains the classification labels. + :param str training_data_file: (optional) Name of the CSV file with training + data that is used to train the document classifier. + :param str test_data_file: (optional) Name of the CSV file with data that is + used to test the document classifier. If no test data is provided, a subset of + the training data is used for testing purposes. + :param ClassifierFederatedModel federated_classification: (optional) An object + with details for creating federated document classifier models. + """ + + def __init__( + self, + name: str, + *, + classifier_id: Optional[str] = None, + description: Optional[str] = None, + created: Optional[datetime] = None, + language: Optional[str] = None, + enrichments: Optional[List['DocumentClassifierEnrichment']] = None, + recognized_fields: Optional[List[str]] = None, + answer_field: Optional[str] = None, + training_data_file: Optional[str] = None, + test_data_file: Optional[str] = None, + federated_classification: Optional['ClassifierFederatedModel'] = None, + ) -> None: + """ + Initialize a DocumentClassifier object. + + :param str name: A human-readable name of the document classifier. + :param str description: (optional) A description of the document + classifier. + :param str language: (optional) The language of the training data that is + associated with the document classifier. Language is specified by using the + ISO 639-1 language code, such as `en` for English or `ja` for Japanese. + :param List[DocumentClassifierEnrichment] enrichments: (optional) An array + of enrichments to apply to the data that is used to train and test the + document classifier. The output from the enrichments is used as features by + the classifier to classify the document content both during training and at + run time. + :param List[str] recognized_fields: (optional) An array of fields that are + used to train the document classifier. The same set of fields must exist in + the training data, the test data, and the documents where the resulting + document classifier enrichment is applied at run time. + :param str answer_field: (optional) The name of the field from the training + and test data that contains the classification labels. + :param str training_data_file: (optional) Name of the CSV file with + training data that is used to train the document classifier. + :param str test_data_file: (optional) Name of the CSV file with data that + is used to test the document classifier. If no test data is provided, a + subset of the training data is used for testing purposes. + :param ClassifierFederatedModel federated_classification: (optional) An + object with details for creating federated document classifier models. + """ + self.classifier_id = classifier_id + self.name = name + self.description = description + self.created = created + self.language = language + self.enrichments = enrichments + self.recognized_fields = recognized_fields + self.answer_field = answer_field + self.training_data_file = training_data_file + self.test_data_file = test_data_file + self.federated_classification = federated_classification + + @classmethod + def from_dict(cls, _dict: Dict) -> 'DocumentClassifier': + """Initialize a DocumentClassifier object from a json dictionary.""" + args = {} + if (classifier_id := _dict.get('classifier_id')) is not None: + args['classifier_id'] = classifier_id + if (name := _dict.get('name')) is not None: + args['name'] = name + else: + raise ValueError( + 'Required property \'name\' not present in DocumentClassifier JSON' + ) + if (description := _dict.get('description')) is not None: + args['description'] = description + if (created := _dict.get('created')) is not None: + args['created'] = string_to_datetime(created) + if (language := _dict.get('language')) is not None: + args['language'] = language + if (enrichments := _dict.get('enrichments')) is not None: + args['enrichments'] = [ + DocumentClassifierEnrichment.from_dict(v) for v in enrichments + ] + if (recognized_fields := _dict.get('recognized_fields')) is not None: + args['recognized_fields'] = recognized_fields + if (answer_field := _dict.get('answer_field')) is not None: + args['answer_field'] = answer_field + if (training_data_file := _dict.get('training_data_file')) is not None: + args['training_data_file'] = training_data_file + if (test_data_file := _dict.get('test_data_file')) is not None: + args['test_data_file'] = test_data_file + if (federated_classification := + _dict.get('federated_classification')) is not None: + args[ + 'federated_classification'] = ClassifierFederatedModel.from_dict( + federated_classification) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DocumentClassifier object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'classifier_id') and getattr( + self, 'classifier_id') is not None: + _dict['classifier_id'] = getattr(self, 'classifier_id') + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'created') and getattr(self, 'created') is not None: + _dict['created'] = datetime_to_string(getattr(self, 'created')) + if hasattr(self, 'language') and self.language is not None: + _dict['language'] = self.language + if hasattr(self, 'enrichments') and self.enrichments is not None: + enrichments_list = [] + for v in self.enrichments: + if isinstance(v, dict): + enrichments_list.append(v) + else: + enrichments_list.append(v.to_dict()) + _dict['enrichments'] = enrichments_list + if hasattr(self, + 'recognized_fields') and self.recognized_fields is not None: + _dict['recognized_fields'] = self.recognized_fields + if hasattr(self, 'answer_field') and self.answer_field is not None: + _dict['answer_field'] = self.answer_field + if hasattr( + self, + 'training_data_file') and self.training_data_file is not None: + _dict['training_data_file'] = self.training_data_file + if hasattr(self, 'test_data_file') and self.test_data_file is not None: + _dict['test_data_file'] = self.test_data_file + if hasattr(self, 'federated_classification' + ) and self.federated_classification is not None: + if isinstance(self.federated_classification, dict): + _dict[ + 'federated_classification'] = self.federated_classification + else: + _dict[ + 'federated_classification'] = self.federated_classification.to_dict( + ) + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DocumentClassifier object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'DocumentClassifier') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'DocumentClassifier') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class DocumentClassifierEnrichment: + """ + An object that describes enrichments that are applied to the training and test data + that is used by the document classifier. + + :param str enrichment_id: The Universally Unique Identifier (UUID) of the + enrichment. + :param List[str] fields: An array of field names where the enrichment is + applied. + """ + + def __init__( + self, + enrichment_id: str, + fields: List[str], + ) -> None: + """ + Initialize a DocumentClassifierEnrichment object. + + :param str enrichment_id: The Universally Unique Identifier (UUID) of the + enrichment. + :param List[str] fields: An array of field names where the enrichment is + applied. + """ + self.enrichment_id = enrichment_id + self.fields = fields + + @classmethod + def from_dict(cls, _dict: Dict) -> 'DocumentClassifierEnrichment': + """Initialize a DocumentClassifierEnrichment object from a json dictionary.""" + args = {} + if (enrichment_id := _dict.get('enrichment_id')) is not None: + args['enrichment_id'] = enrichment_id + else: + raise ValueError( + 'Required property \'enrichment_id\' not present in DocumentClassifierEnrichment JSON' + ) + if (fields := _dict.get('fields')) is not None: + args['fields'] = fields + else: + raise ValueError( + 'Required property \'fields\' not present in DocumentClassifierEnrichment JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DocumentClassifierEnrichment object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'enrichment_id') and self.enrichment_id is not None: + _dict['enrichment_id'] = self.enrichment_id + if hasattr(self, 'fields') and self.fields is not None: + _dict['fields'] = self.fields + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DocumentClassifierEnrichment object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'DocumentClassifierEnrichment') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'DocumentClassifierEnrichment') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class DocumentClassifierModel: + """ + Information about a document classifier model. + + :param str model_id: (optional) The Universally Unique Identifier (UUID) of the + document classifier model. + :param str name: A human-readable name of the document classifier model. + :param str description: (optional) A description of the document classifier + model. + :param datetime created: (optional) The date that the document classifier model + was created. + :param datetime updated: (optional) The date that the document classifier model + was last updated. + :param str training_data_file: (optional) Name of the CSV file that contains the + training data that is used to train the document classifier model. + :param str test_data_file: (optional) Name of the CSV file that contains data + that is used to test the document classifier model. If no test data is provided, + a subset of the training data is used for testing purposes. + :param str status: (optional) The status of the training run. + :param ClassifierModelEvaluation evaluation: (optional) An object that contains + information about a trained document classifier model. + :param str enrichment_id: (optional) The Universally Unique Identifier (UUID) of + the enrichment that is generated by this document classifier model. + :param datetime deployed_at: (optional) The date that the document classifier + model was deployed. + """ + + def __init__( + self, + name: str, + *, + model_id: Optional[str] = None, + description: Optional[str] = None, + created: Optional[datetime] = None, + updated: Optional[datetime] = None, + training_data_file: Optional[str] = None, + test_data_file: Optional[str] = None, + status: Optional[str] = None, + evaluation: Optional['ClassifierModelEvaluation'] = None, + enrichment_id: Optional[str] = None, + deployed_at: Optional[datetime] = None, + ) -> None: + """ + Initialize a DocumentClassifierModel object. + + :param str name: A human-readable name of the document classifier model. + :param str description: (optional) A description of the document classifier + model. + :param str training_data_file: (optional) Name of the CSV file that + contains the training data that is used to train the document classifier + model. + :param str test_data_file: (optional) Name of the CSV file that contains + data that is used to test the document classifier model. If no test data is + provided, a subset of the training data is used for testing purposes. + :param str status: (optional) The status of the training run. + :param ClassifierModelEvaluation evaluation: (optional) An object that + contains information about a trained document classifier model. + :param str enrichment_id: (optional) The Universally Unique Identifier + (UUID) of the enrichment that is generated by this document classifier + model. + """ + self.model_id = model_id + self.name = name + self.description = description + self.created = created + self.updated = updated + self.training_data_file = training_data_file + self.test_data_file = test_data_file + self.status = status + self.evaluation = evaluation + self.enrichment_id = enrichment_id + self.deployed_at = deployed_at + + @classmethod + def from_dict(cls, _dict: Dict) -> 'DocumentClassifierModel': + """Initialize a DocumentClassifierModel object from a json dictionary.""" + args = {} + if (model_id := _dict.get('model_id')) is not None: + args['model_id'] = model_id + if (name := _dict.get('name')) is not None: + args['name'] = name + else: + raise ValueError( + 'Required property \'name\' not present in DocumentClassifierModel JSON' + ) + if (description := _dict.get('description')) is not None: + args['description'] = description + if (created := _dict.get('created')) is not None: + args['created'] = string_to_datetime(created) + if (updated := _dict.get('updated')) is not None: + args['updated'] = string_to_datetime(updated) + if (training_data_file := _dict.get('training_data_file')) is not None: + args['training_data_file'] = training_data_file + if (test_data_file := _dict.get('test_data_file')) is not None: + args['test_data_file'] = test_data_file + if (status := _dict.get('status')) is not None: + args['status'] = status + if (evaluation := _dict.get('evaluation')) is not None: + args['evaluation'] = ClassifierModelEvaluation.from_dict(evaluation) + if (enrichment_id := _dict.get('enrichment_id')) is not None: + args['enrichment_id'] = enrichment_id + if (deployed_at := _dict.get('deployed_at')) is not None: + args['deployed_at'] = string_to_datetime(deployed_at) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DocumentClassifierModel object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'model_id') and getattr(self, 'model_id') is not None: + _dict['model_id'] = getattr(self, 'model_id') + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'created') and getattr(self, 'created') is not None: + _dict['created'] = datetime_to_string(getattr(self, 'created')) + if hasattr(self, 'updated') and getattr(self, 'updated') is not None: + _dict['updated'] = datetime_to_string(getattr(self, 'updated')) + if hasattr( + self, + 'training_data_file') and self.training_data_file is not None: + _dict['training_data_file'] = self.training_data_file + if hasattr(self, 'test_data_file') and self.test_data_file is not None: + _dict['test_data_file'] = self.test_data_file + if hasattr(self, 'status') and self.status is not None: + _dict['status'] = self.status + if hasattr(self, 'evaluation') and self.evaluation is not None: + if isinstance(self.evaluation, dict): + _dict['evaluation'] = self.evaluation + else: + _dict['evaluation'] = self.evaluation.to_dict() + if hasattr(self, 'enrichment_id') and self.enrichment_id is not None: + _dict['enrichment_id'] = self.enrichment_id + if hasattr(self, 'deployed_at') and getattr(self, + 'deployed_at') is not None: + _dict['deployed_at'] = datetime_to_string( + getattr(self, 'deployed_at')) + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DocumentClassifierModel object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'DocumentClassifierModel') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'DocumentClassifierModel') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class StatusEnum(str, Enum): + """ + The status of the training run. + """ + + TRAINING = 'training' + AVAILABLE = 'available' + FAILED = 'failed' + + +class DocumentClassifierModels: + """ + An object that contains a list of document classifier model definitions. + + :param List[DocumentClassifierModel] models: (optional) An array of document + classifier model definitions. + """ + + def __init__( + self, + *, + models: Optional[List['DocumentClassifierModel']] = None, + ) -> None: + """ + Initialize a DocumentClassifierModels object. + + :param List[DocumentClassifierModel] models: (optional) An array of + document classifier model definitions. + """ + self.models = models + + @classmethod + def from_dict(cls, _dict: Dict) -> 'DocumentClassifierModels': + """Initialize a DocumentClassifierModels object from a json dictionary.""" + args = {} + if (models := _dict.get('models')) is not None: + args['models'] = [ + DocumentClassifierModel.from_dict(v) for v in models + ] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DocumentClassifierModels object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'models') and self.models is not None: + models_list = [] + for v in self.models: + if isinstance(v, dict): + models_list.append(v) + else: + models_list.append(v.to_dict()) + _dict['models'] = models_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DocumentClassifierModels object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'DocumentClassifierModels') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'DocumentClassifierModels') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class DocumentClassifiers: + """ + An object that contains a list of document classifier definitions. + + :param List[DocumentClassifier] classifiers: (optional) An array of document + classifier definitions. + """ + + def __init__( + self, + *, + classifiers: Optional[List['DocumentClassifier']] = None, + ) -> None: + """ + Initialize a DocumentClassifiers object. + + :param List[DocumentClassifier] classifiers: (optional) An array of + document classifier definitions. + """ + self.classifiers = classifiers + + @classmethod + def from_dict(cls, _dict: Dict) -> 'DocumentClassifiers': + """Initialize a DocumentClassifiers object from a json dictionary.""" + args = {} + if (classifiers := _dict.get('classifiers')) is not None: + args['classifiers'] = [ + DocumentClassifier.from_dict(v) for v in classifiers + ] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DocumentClassifiers object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'classifiers') and self.classifiers is not None: + classifiers_list = [] + for v in self.classifiers: + if isinstance(v, dict): + classifiers_list.append(v) + else: + classifiers_list.append(v.to_dict()) + _dict['classifiers'] = classifiers_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DocumentClassifiers object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'DocumentClassifiers') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'DocumentClassifiers') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class DocumentDetails: + """ + Information about a document. + + :param str document_id: (optional) The unique identifier of the document. + :param datetime created: (optional) Date and time that the document is added to + the collection. For a child document, the date and time when the process that + generates the child document runs. The date-time format is + `yyyy-MM-dd'T'HH:mm:ss.SSS'Z'`. + :param datetime updated: (optional) Date and time that the document is finished + being processed and is indexed. This date changes whenever the document is + reprocessed, including for enrichment changes. The date-time format is + `yyyy-MM-dd'T'HH:mm:ss.SSS'Z'`. + :param str status: (optional) The status of the ingestion of the document. The + possible values are: + * `available`: Ingestion is finished and the document is indexed. + * `failed`: Ingestion is finished, but the document is not indexed because of an + error. + * `pending`: The document is uploaded, but the ingestion process is not started. + * `processing`: Ingestion is in progress. + :param List[Notice] notices: (optional) Array of JSON objects for notices, + meaning warning or error messages, that are produced by the document ingestion + process. The array does not include notices that are produced for child + documents that are generated when a document is processed. + :param DocumentDetailsChildren children: (optional) Information about the child + documents that are generated from a single document during ingestion or other + processing. + :param str filename: (optional) Name of the original source file (if available). + :param str file_type: (optional) The type of the original source file, such as + `csv`, `excel`, `html`, `json`, `pdf`, `text`, `word`, and so on. + :param str sha256: (optional) The SHA-256 hash of the original source file. The + hash is formatted as a hexadecimal string. + """ + + def __init__( + self, + *, + document_id: Optional[str] = None, + created: Optional[datetime] = None, + updated: Optional[datetime] = None, + status: Optional[str] = None, + notices: Optional[List['Notice']] = None, + children: Optional['DocumentDetailsChildren'] = None, + filename: Optional[str] = None, + file_type: Optional[str] = None, + sha256: Optional[str] = None, + ) -> None: + """ + Initialize a DocumentDetails object. + + :param str status: (optional) The status of the ingestion of the document. + The possible values are: + * `available`: Ingestion is finished and the document is indexed. + * `failed`: Ingestion is finished, but the document is not indexed because + of an error. + * `pending`: The document is uploaded, but the ingestion process is not + started. + * `processing`: Ingestion is in progress. + :param List[Notice] notices: (optional) Array of JSON objects for notices, + meaning warning or error messages, that are produced by the document + ingestion process. The array does not include notices that are produced for + child documents that are generated when a document is processed. + :param DocumentDetailsChildren children: (optional) Information about the + child documents that are generated from a single document during ingestion + or other processing. + :param str filename: (optional) Name of the original source file (if + available). + :param str file_type: (optional) The type of the original source file, such + as `csv`, `excel`, `html`, `json`, `pdf`, `text`, `word`, and so on. + :param str sha256: (optional) The SHA-256 hash of the original source file. + The hash is formatted as a hexadecimal string. + """ + self.document_id = document_id + self.created = created + self.updated = updated + self.status = status + self.notices = notices + self.children = children + self.filename = filename + self.file_type = file_type + self.sha256 = sha256 + + @classmethod + def from_dict(cls, _dict: Dict) -> 'DocumentDetails': + """Initialize a DocumentDetails object from a json dictionary.""" + args = {} + if (document_id := _dict.get('document_id')) is not None: + args['document_id'] = document_id + if (created := _dict.get('created')) is not None: + args['created'] = string_to_datetime(created) + if (updated := _dict.get('updated')) is not None: + args['updated'] = string_to_datetime(updated) + if (status := _dict.get('status')) is not None: + args['status'] = status + if (notices := _dict.get('notices')) is not None: + args['notices'] = [Notice.from_dict(v) for v in notices] + if (children := _dict.get('children')) is not None: + args['children'] = DocumentDetailsChildren.from_dict(children) + if (filename := _dict.get('filename')) is not None: + args['filename'] = filename + if (file_type := _dict.get('file_type')) is not None: + args['file_type'] = file_type + if (sha256 := _dict.get('sha256')) is not None: + args['sha256'] = sha256 + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DocumentDetails object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'document_id') and getattr(self, + 'document_id') is not None: + _dict['document_id'] = getattr(self, 'document_id') + if hasattr(self, 'created') and getattr(self, 'created') is not None: + _dict['created'] = datetime_to_string(getattr(self, 'created')) + if hasattr(self, 'updated') and getattr(self, 'updated') is not None: + _dict['updated'] = datetime_to_string(getattr(self, 'updated')) + if hasattr(self, 'status') and self.status is not None: + _dict['status'] = self.status + if hasattr(self, 'notices') and self.notices is not None: + notices_list = [] + for v in self.notices: + if isinstance(v, dict): + notices_list.append(v) + else: + notices_list.append(v.to_dict()) + _dict['notices'] = notices_list + if hasattr(self, 'children') and self.children is not None: + if isinstance(self.children, dict): + _dict['children'] = self.children + else: + _dict['children'] = self.children.to_dict() + if hasattr(self, 'filename') and self.filename is not None: + _dict['filename'] = self.filename + if hasattr(self, 'file_type') and self.file_type is not None: + _dict['file_type'] = self.file_type + if hasattr(self, 'sha256') and self.sha256 is not None: + _dict['sha256'] = self.sha256 + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DocumentDetails object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'DocumentDetails') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'DocumentDetails') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class StatusEnum(str, Enum): + """ + The status of the ingestion of the document. The possible values are: + * `available`: Ingestion is finished and the document is indexed. + * `failed`: Ingestion is finished, but the document is not indexed because of an + error. + * `pending`: The document is uploaded, but the ingestion process is not started. + * `processing`: Ingestion is in progress. + """ + + AVAILABLE = 'available' + FAILED = 'failed' + PENDING = 'pending' + PROCESSING = 'processing' + + +class DocumentDetailsChildren: + """ + Information about the child documents that are generated from a single document during + ingestion or other processing. + + :param bool have_notices: (optional) Indicates whether the child documents have + any notices. The value is `false` if the document does not have child documents. + :param int count: (optional) Number of child documents. The value is `0` when + processing of the document doesn't generate any child documents. + """ + + def __init__( + self, + *, + have_notices: Optional[bool] = None, + count: Optional[int] = None, + ) -> None: + """ + Initialize a DocumentDetailsChildren object. + + :param bool have_notices: (optional) Indicates whether the child documents + have any notices. The value is `false` if the document does not have child + documents. + :param int count: (optional) Number of child documents. The value is `0` + when processing of the document doesn't generate any child documents. + """ + self.have_notices = have_notices + self.count = count + + @classmethod + def from_dict(cls, _dict: Dict) -> 'DocumentDetailsChildren': + """Initialize a DocumentDetailsChildren object from a json dictionary.""" + args = {} + if (have_notices := _dict.get('have_notices')) is not None: + args['have_notices'] = have_notices + if (count := _dict.get('count')) is not None: + args['count'] = count + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DocumentDetailsChildren object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'have_notices') and self.have_notices is not None: + _dict['have_notices'] = self.have_notices + if hasattr(self, 'count') and self.count is not None: + _dict['count'] = self.count + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this DocumentDetailsChildren object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'DocumentDetailsChildren') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'DocumentDetailsChildren') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Enrichment: + """ + Information about a specific enrichment. + + :param str enrichment_id: (optional) The Universally Unique Identifier (UUID) of + this enrichment. + :param str name: (optional) The human readable name for this enrichment. + :param str description: (optional) The description of this enrichment. + :param str type: (optional) The type of this enrichment. + :param EnrichmentOptions options: (optional) An object that contains options for + the current enrichment. Starting with version `2020-08-30`, the enrichment + options are not included in responses from the List Enrichments method. + """ + + def __init__( + self, + *, + enrichment_id: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + type: Optional[str] = None, + options: Optional['EnrichmentOptions'] = None, + ) -> None: + """ + Initialize a Enrichment object. + + :param str name: (optional) The human readable name for this enrichment. + :param str description: (optional) The description of this enrichment. + :param str type: (optional) The type of this enrichment. + :param EnrichmentOptions options: (optional) An object that contains + options for the current enrichment. Starting with version `2020-08-30`, the + enrichment options are not included in responses from the List Enrichments + method. + """ + self.enrichment_id = enrichment_id + self.name = name + self.description = description + self.type = type + self.options = options + + @classmethod + def from_dict(cls, _dict: Dict) -> 'Enrichment': + """Initialize a Enrichment object from a json dictionary.""" + args = {} + if (enrichment_id := _dict.get('enrichment_id')) is not None: + args['enrichment_id'] = enrichment_id + if (name := _dict.get('name')) is not None: + args['name'] = name + if (description := _dict.get('description')) is not None: + args['description'] = description + if (type := _dict.get('type')) is not None: + args['type'] = type + if (options := _dict.get('options')) is not None: + args['options'] = EnrichmentOptions.from_dict(options) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Enrichment object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'enrichment_id') and getattr( + self, 'enrichment_id') is not None: + _dict['enrichment_id'] = getattr(self, 'enrichment_id') + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'options') and self.options is not None: + if isinstance(self.options, dict): + _dict['options'] = self.options + else: + _dict['options'] = self.options.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Enrichment object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'Enrichment') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'Enrichment') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class TypeEnum(str, Enum): + """ + The type of this enrichment. + """ + + PART_OF_SPEECH = 'part_of_speech' + SENTIMENT = 'sentiment' + NATURAL_LANGUAGE_UNDERSTANDING = 'natural_language_understanding' + DICTIONARY = 'dictionary' + REGULAR_EXPRESSION = 'regular_expression' + UIMA_ANNOTATOR = 'uima_annotator' + RULE_BASED = 'rule_based' + WATSON_KNOWLEDGE_STUDIO_MODEL = 'watson_knowledge_studio_model' + CLASSIFIER = 'classifier' + WEBHOOK = 'webhook' + SENTENCE_CLASSIFIER = 'sentence_classifier' + + +class EnrichmentOptions: + """ + An object that contains options for the current enrichment. Starting with version + `2020-08-30`, the enrichment options are not included in responses from the List + Enrichments method. + + :param List[str] languages: (optional) An array of supported languages for this + enrichment. When creating an enrichment, only specify a language that is used by + the model or in the dictionary. Required when **type** is `dictionary`. Optional + when **type** is `rule_based`. Not valid when creating any other type of + enrichment. + :param str entity_type: (optional) The name of the entity type. This value is + used as the field name in the index. Required when **type** is `dictionary` or + `regular_expression`. Not valid when creating any other type of enrichment. + :param str regular_expression: (optional) The regular expression to apply for + this enrichment. Required when **type** is `regular_expression`. Not valid when + creating any other type of enrichment. + :param str result_field: (optional) The name of the result document field that + this enrichment creates. Required when **type** is `rule_based` or `classifier`. + Not valid when creating any other type of enrichment. + :param str classifier_id: (optional) The Universally Unique Identifier (UUID) of + the document classifier. Required when **type** is `classifier`. Not valid when + creating any other type of enrichment. + :param str model_id: (optional) The Universally Unique Identifier (UUID) of the + document classifier model. Required when **type** is `classifier`. Not valid + when creating any other type of enrichment. + :param float confidence_threshold: (optional) Specifies a threshold. Only + classes with evaluation confidence scores that are higher than the specified + threshold are included in the output. Optional when **type** is `classifier`. + Not valid when creating any other type of enrichment. + :param int top_k: (optional) Evaluates only the classes that fall in the top set + of results when ranked by confidence. For example, if set to `5`, then the top + five classes for each document are evaluated. If set to 0, the + **confidence_threshold** is used to determine the predicted classes. Optional + when **type** is `classifier`. Not valid when creating any other type of + enrichment. + :param str url: (optional) A URL that uses the SSL protocol (begins with https) + for the webhook. Required when type is `webhook`. Not valid when creating any + other type of enrichment. + :param str version: (optional) The Discovery API version that allows to + distinguish the schema. The version is specified in the `yyyy-mm-dd` format. + Optional when `type` is `webhook`. Not valid when creating any other type of + enrichment. + :param str secret: (optional) A private key can be included in the request to + authenticate with the external service. The maximum length is 1,024 characters. + Optional when `type` is `webhook`. Not valid when creating any other type of + enrichment. + :param WebhookHeader headers_: (optional) An array of headers to pass with the + HTTP request. Optional when `type` is `webhook`. Not valid when creating any + other type of enrichment. + :param str location_encoding: (optional) Discovery calculates offsets of the + text's location with this encoding type in documents. Use the same location + encoding type in both Discovery and external enrichment for a document. + These encoding types are supported: `utf-8`, `utf-16`, and `utf-32`. Optional + when `type` is `webhook`. Not valid when creating any other type of enrichment. + """ + + def __init__( + self, + *, + languages: Optional[List[str]] = None, + entity_type: Optional[str] = None, + regular_expression: Optional[str] = None, + result_field: Optional[str] = None, + classifier_id: Optional[str] = None, + model_id: Optional[str] = None, + confidence_threshold: Optional[float] = None, + top_k: Optional[int] = None, + url: Optional[str] = None, + version: Optional[str] = None, + secret: Optional[str] = None, + headers_: Optional['WebhookHeader'] = None, + location_encoding: Optional[str] = None, + ) -> None: + """ + Initialize a EnrichmentOptions object. + + :param List[str] languages: (optional) An array of supported languages for + this enrichment. When creating an enrichment, only specify a language that + is used by the model or in the dictionary. Required when **type** is + `dictionary`. Optional when **type** is `rule_based`. Not valid when + creating any other type of enrichment. + :param str entity_type: (optional) The name of the entity type. This value + is used as the field name in the index. Required when **type** is + `dictionary` or `regular_expression`. Not valid when creating any other + type of enrichment. + :param str regular_expression: (optional) The regular expression to apply + for this enrichment. Required when **type** is `regular_expression`. Not + valid when creating any other type of enrichment. + :param str result_field: (optional) The name of the result document field + that this enrichment creates. Required when **type** is `rule_based` or + `classifier`. Not valid when creating any other type of enrichment. + :param str classifier_id: (optional) The Universally Unique Identifier + (UUID) of the document classifier. Required when **type** is `classifier`. + Not valid when creating any other type of enrichment. + :param str model_id: (optional) The Universally Unique Identifier (UUID) of + the document classifier model. Required when **type** is `classifier`. Not + valid when creating any other type of enrichment. + :param float confidence_threshold: (optional) Specifies a threshold. Only + classes with evaluation confidence scores that are higher than the + specified threshold are included in the output. Optional when **type** is + `classifier`. Not valid when creating any other type of enrichment. + :param int top_k: (optional) Evaluates only the classes that fall in the + top set of results when ranked by confidence. For example, if set to `5`, + then the top five classes for each document are evaluated. If set to 0, the + **confidence_threshold** is used to determine the predicted classes. + Optional when **type** is `classifier`. Not valid when creating any other + type of enrichment. + :param str url: (optional) A URL that uses the SSL protocol (begins with + https) for the webhook. Required when type is `webhook`. Not valid when + creating any other type of enrichment. + :param str version: (optional) The Discovery API version that allows to + distinguish the schema. The version is specified in the `yyyy-mm-dd` + format. Optional when `type` is `webhook`. Not valid when creating any + other type of enrichment. + :param str secret: (optional) A private key can be included in the request + to authenticate with the external service. The maximum length is 1,024 + characters. Optional when `type` is `webhook`. Not valid when creating any + other type of enrichment. + :param WebhookHeader headers_: (optional) An array of headers to pass with + the HTTP request. Optional when `type` is `webhook`. Not valid when + creating any other type of enrichment. + :param str location_encoding: (optional) Discovery calculates offsets of + the text's location with this encoding type in documents. Use the same + location encoding type in both Discovery and external enrichment for a + document. + These encoding types are supported: `utf-8`, `utf-16`, and `utf-32`. + Optional when `type` is `webhook`. Not valid when creating any other type + of enrichment. + """ + self.languages = languages + self.entity_type = entity_type + self.regular_expression = regular_expression + self.result_field = result_field + self.classifier_id = classifier_id + self.model_id = model_id + self.confidence_threshold = confidence_threshold + self.top_k = top_k + self.url = url + self.version = version + self.secret = secret + self.headers_ = headers_ + self.location_encoding = location_encoding + + @classmethod + def from_dict(cls, _dict: Dict) -> 'EnrichmentOptions': + """Initialize a EnrichmentOptions object from a json dictionary.""" + args = {} + if (languages := _dict.get('languages')) is not None: + args['languages'] = languages + if (entity_type := _dict.get('entity_type')) is not None: + args['entity_type'] = entity_type + if (regular_expression := _dict.get('regular_expression')) is not None: + args['regular_expression'] = regular_expression + if (result_field := _dict.get('result_field')) is not None: + args['result_field'] = result_field + if (classifier_id := _dict.get('classifier_id')) is not None: + args['classifier_id'] = classifier_id + if (model_id := _dict.get('model_id')) is not None: + args['model_id'] = model_id + if (confidence_threshold := + _dict.get('confidence_threshold')) is not None: + args['confidence_threshold'] = confidence_threshold + if (top_k := _dict.get('top_k')) is not None: + args['top_k'] = top_k + if (url := _dict.get('url')) is not None: + args['url'] = url + if (version := _dict.get('version')) is not None: + args['version'] = version + if (secret := _dict.get('secret')) is not None: + args['secret'] = secret + if (headers_ := _dict.get('headers')) is not None: + args['headers_'] = WebhookHeader.from_dict(headers_) + if (location_encoding := _dict.get('location_encoding')) is not None: + args['location_encoding'] = location_encoding + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a EnrichmentOptions object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'languages') and self.languages is not None: + _dict['languages'] = self.languages + if hasattr(self, 'entity_type') and self.entity_type is not None: + _dict['entity_type'] = self.entity_type + if hasattr( + self, + 'regular_expression') and self.regular_expression is not None: + _dict['regular_expression'] = self.regular_expression + if hasattr(self, 'result_field') and self.result_field is not None: + _dict['result_field'] = self.result_field + if hasattr(self, 'classifier_id') and self.classifier_id is not None: + _dict['classifier_id'] = self.classifier_id + if hasattr(self, 'model_id') and self.model_id is not None: + _dict['model_id'] = self.model_id + if hasattr(self, 'confidence_threshold' + ) and self.confidence_threshold is not None: + _dict['confidence_threshold'] = self.confidence_threshold + if hasattr(self, 'top_k') and self.top_k is not None: + _dict['top_k'] = self.top_k + if hasattr(self, 'url') and self.url is not None: + _dict['url'] = self.url + if hasattr(self, 'version') and self.version is not None: + _dict['version'] = self.version + if hasattr(self, 'secret') and self.secret is not None: + _dict['secret'] = self.secret + if hasattr(self, 'headers_') and self.headers_ is not None: + if isinstance(self.headers_, dict): + _dict['headers'] = self.headers_ + else: + _dict['headers'] = self.headers_.to_dict() + if hasattr(self, + 'location_encoding') and self.location_encoding is not None: + _dict['location_encoding'] = self.location_encoding + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this EnrichmentOptions object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'EnrichmentOptions') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'EnrichmentOptions') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Enrichments: + """ + An object that contains an array of enrichment definitions. + + :param List[Enrichment] enrichments: (optional) An array of enrichment + definitions. + """ + + def __init__( + self, + *, + enrichments: Optional[List['Enrichment']] = None, + ) -> None: + """ + Initialize a Enrichments object. + + :param List[Enrichment] enrichments: (optional) An array of enrichment + definitions. + """ + self.enrichments = enrichments + + @classmethod + def from_dict(cls, _dict: Dict) -> 'Enrichments': + """Initialize a Enrichments object from a json dictionary.""" + args = {} + if (enrichments := _dict.get('enrichments')) is not None: + args['enrichments'] = [Enrichment.from_dict(v) for v in enrichments] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Enrichments object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'enrichments') and self.enrichments is not None: + enrichments_list = [] + for v in self.enrichments: + if isinstance(v, dict): + enrichments_list.append(v) + else: + enrichments_list.append(v.to_dict()) + _dict['enrichments'] = enrichments_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Enrichments object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'Enrichments') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'Enrichments') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Expansion: + """ + An expansion definition. Each object respresents one set of expandable strings. For + example, you could have expansions for the word `hot` in one object, and expansions + for the word `cold` in another. Follow these guidelines when you add terms: + * Specify the terms in lowercase. Lowercase terms expand to uppercase. + * Multiword terms are supported only in bidirectional expansions. + * Do not specify a term that is specified in the stop words list for the collection. + + :param List[str] input_terms: (optional) A list of terms that will be expanded + for this expansion. If specified, only the items in this list are expanded. + :param List[str] expanded_terms: A list of terms that this expansion will be + expanded to. If specified without **input_terms**, the list also functions as + the input term list. + """ + + def __init__( + self, + expanded_terms: List[str], + *, + input_terms: Optional[List[str]] = None, + ) -> None: + """ + Initialize a Expansion object. + + :param List[str] expanded_terms: A list of terms that this expansion will + be expanded to. If specified without **input_terms**, the list also + functions as the input term list. + :param List[str] input_terms: (optional) A list of terms that will be + expanded for this expansion. If specified, only the items in this list are + expanded. + """ + self.input_terms = input_terms + self.expanded_terms = expanded_terms + + @classmethod + def from_dict(cls, _dict: Dict) -> 'Expansion': + """Initialize a Expansion object from a json dictionary.""" + args = {} + if (input_terms := _dict.get('input_terms')) is not None: + args['input_terms'] = input_terms + if (expanded_terms := _dict.get('expanded_terms')) is not None: + args['expanded_terms'] = expanded_terms + else: + raise ValueError( + 'Required property \'expanded_terms\' not present in Expansion JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Expansion object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'input_terms') and self.input_terms is not None: + _dict['input_terms'] = self.input_terms + if hasattr(self, 'expanded_terms') and self.expanded_terms is not None: + _dict['expanded_terms'] = self.expanded_terms + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Expansion object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'Expansion') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'Expansion') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Expansions: + """ + The query expansion definitions for the specified collection. + + :param List[Expansion] expansions: An array of query expansion definitions. + Each object in the **expansions** array represents a term or set of terms that + will be expanded into other terms. Each expansion object can be configured as + `bidirectional` or `unidirectional`. + * **Bidirectional**: Each entry in the `expanded_terms` list expands to include + all expanded terms. For example, a query for `ibm` expands to `ibm OR + international business machines OR big blue`. + * **Unidirectional**: The terms in `input_terms` in the query are replaced by + the terms in `expanded_terms`. For example, a query for the often misused term + `on premise` is converted to `on premises OR on-premises` and does not contain + the original term. If you want an input term to be included in the query, then + repeat the input term in the expanded terms list. + """ + + def __init__( + self, + expansions: List['Expansion'], + ) -> None: + """ + Initialize a Expansions object. + + :param List[Expansion] expansions: An array of query expansion definitions. + Each object in the **expansions** array represents a term or set of terms + that will be expanded into other terms. Each expansion object can be + configured as `bidirectional` or `unidirectional`. + * **Bidirectional**: Each entry in the `expanded_terms` list expands to + include all expanded terms. For example, a query for `ibm` expands to `ibm + OR international business machines OR big blue`. + * **Unidirectional**: The terms in `input_terms` in the query are replaced + by the terms in `expanded_terms`. For example, a query for the often + misused term `on premise` is converted to `on premises OR on-premises` and + does not contain the original term. If you want an input term to be + included in the query, then repeat the input term in the expanded terms + list. + """ + self.expansions = expansions + + @classmethod + def from_dict(cls, _dict: Dict) -> 'Expansions': + """Initialize a Expansions object from a json dictionary.""" + args = {} + if (expansions := _dict.get('expansions')) is not None: + args['expansions'] = [Expansion.from_dict(v) for v in expansions] + else: + raise ValueError( + 'Required property \'expansions\' not present in Expansions JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Expansions object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'expansions') and self.expansions is not None: + expansions_list = [] + for v in self.expansions: + if isinstance(v, dict): + expansions_list.append(v) + else: + expansions_list.append(v.to_dict()) + _dict['expansions'] = expansions_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Expansions object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'Expansions') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'Expansions') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Field: + """ + Object that contains field details. + + :param str field: (optional) The name of the field. + :param str type: (optional) The type of the field. + :param str collection_id: (optional) The collection Id of the collection where + the field was found. + """ + + def __init__( + self, + *, + field: Optional[str] = None, + type: Optional[str] = None, + collection_id: Optional[str] = None, + ) -> None: + """ + Initialize a Field object. + + """ + self.field = field + self.type = type + self.collection_id = collection_id + + @classmethod + def from_dict(cls, _dict: Dict) -> 'Field': + """Initialize a Field object from a json dictionary.""" + args = {} + if (field := _dict.get('field')) is not None: + args['field'] = field + if (type := _dict.get('type')) is not None: + args['type'] = type + if (collection_id := _dict.get('collection_id')) is not None: + args['collection_id'] = collection_id + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Field object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'field') and getattr(self, 'field') is not None: + _dict['field'] = getattr(self, 'field') + if hasattr(self, 'type') and getattr(self, 'type') is not None: + _dict['type'] = getattr(self, 'type') + if hasattr(self, 'collection_id') and getattr( + self, 'collection_id') is not None: + _dict['collection_id'] = getattr(self, 'collection_id') + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Field object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'Field') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'Field') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class TypeEnum(str, Enum): + """ + The type of the field. + """ + + NESTED = 'nested' + STRING = 'string' + DATE = 'date' + LONG = 'long' + INTEGER = 'integer' + SHORT = 'short' + BYTE = 'byte' + DOUBLE = 'double' + FLOAT = 'float' + BOOLEAN = 'boolean' + BINARY = 'binary' + + +class ListBatchesResponse: + """ + An object that contains a list of batches that are ready for enrichment by the + external application. + + :param List[BatchDetails] batches: (optional) An array that lists the batches in + a collection. + """ + + def __init__( + self, + *, + batches: Optional[List['BatchDetails']] = None, + ) -> None: + """ + Initialize a ListBatchesResponse object. + + :param List[BatchDetails] batches: (optional) An array that lists the + batches in a collection. + """ + self.batches = batches + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ListBatchesResponse': + """Initialize a ListBatchesResponse object from a json dictionary.""" + args = {} + if (batches := _dict.get('batches')) is not None: + args['batches'] = [BatchDetails.from_dict(v) for v in batches] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ListBatchesResponse object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'batches') and self.batches is not None: + batches_list = [] + for v in self.batches: + if isinstance(v, dict): + batches_list.append(v) + else: + batches_list.append(v.to_dict()) + _dict['batches'] = batches_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ListBatchesResponse object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ListBatchesResponse') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ListBatchesResponse') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ListCollectionsResponse: + """ + Response object that contains an array of collection details. + + :param List[Collection] collections: (optional) An array that contains + information about each collection in the project. + """ + + def __init__( + self, + *, + collections: Optional[List['Collection']] = None, + ) -> None: + """ + Initialize a ListCollectionsResponse object. + + :param List[Collection] collections: (optional) An array that contains + information about each collection in the project. + """ + self.collections = collections + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ListCollectionsResponse': + """Initialize a ListCollectionsResponse object from a json dictionary.""" + args = {} + if (collections := _dict.get('collections')) is not None: + args['collections'] = [Collection.from_dict(v) for v in collections] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ListCollectionsResponse object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'collections') and self.collections is not None: + collections_list = [] + for v in self.collections: + if isinstance(v, dict): + collections_list.append(v) + else: + collections_list.append(v.to_dict()) + _dict['collections'] = collections_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ListCollectionsResponse object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ListCollectionsResponse') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ListCollectionsResponse') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ListDocumentsResponse: + """ + Response object that contains an array of documents. + + :param int matching_results: (optional) The number of matching results for the + document query. + :param List[DocumentDetails] documents: (optional) An array that lists the + documents in a collection. Only the document ID of each document is returned in + the list. You can use the [Get document](#getdocument) method to get more + information about an individual document. + """ + + def __init__( + self, + *, + matching_results: Optional[int] = None, + documents: Optional[List['DocumentDetails']] = None, + ) -> None: + """ + Initialize a ListDocumentsResponse object. + + :param int matching_results: (optional) The number of matching results for + the document query. + :param List[DocumentDetails] documents: (optional) An array that lists the + documents in a collection. Only the document ID of each document is + returned in the list. You can use the [Get document](#getdocument) method + to get more information about an individual document. + """ + self.matching_results = matching_results + self.documents = documents + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ListDocumentsResponse': + """Initialize a ListDocumentsResponse object from a json dictionary.""" + args = {} + if (matching_results := _dict.get('matching_results')) is not None: + args['matching_results'] = matching_results + if (documents := _dict.get('documents')) is not None: + args['documents'] = [ + DocumentDetails.from_dict(v) for v in documents + ] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ListDocumentsResponse object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, + 'matching_results') and self.matching_results is not None: + _dict['matching_results'] = self.matching_results + if hasattr(self, 'documents') and self.documents is not None: + documents_list = [] + for v in self.documents: + if isinstance(v, dict): + documents_list.append(v) + else: + documents_list.append(v.to_dict()) + _dict['documents'] = documents_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ListDocumentsResponse object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ListDocumentsResponse') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ListDocumentsResponse') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ListFieldsResponse: + """ + The list of fetched fields. + The fields are returned using a fully qualified name format, however, the format + differs slightly from that used by the query operations. + * Fields which contain nested objects are assigned a type of "nested". + * Fields which belong to a nested object are prefixed with `.properties` (for + example, `warnings.properties.severity` means that the `warnings` object has a + property called `severity`). + + :param List[Field] fields: (optional) An array that contains information about + each field in the collections. + """ + + def __init__( + self, + *, + fields: Optional[List['Field']] = None, + ) -> None: + """ + Initialize a ListFieldsResponse object. + + :param List[Field] fields: (optional) An array that contains information + about each field in the collections. + """ + self.fields = fields + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ListFieldsResponse': + """Initialize a ListFieldsResponse object from a json dictionary.""" + args = {} + if (fields := _dict.get('fields')) is not None: + args['fields'] = [Field.from_dict(v) for v in fields] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ListFieldsResponse object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'fields') and self.fields is not None: + fields_list = [] + for v in self.fields: + if isinstance(v, dict): + fields_list.append(v) + else: + fields_list.append(v.to_dict()) + _dict['fields'] = fields_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ListFieldsResponse object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ListFieldsResponse') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ListFieldsResponse') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ListProjectsResponse: + """ + A list of projects in this instance. + + :param List[ProjectListDetails] projects: (optional) An array of project + details. + """ + + def __init__( + self, + *, + projects: Optional[List['ProjectListDetails']] = None, + ) -> None: + """ + Initialize a ListProjectsResponse object. + + :param List[ProjectListDetails] projects: (optional) An array of project + details. + """ + self.projects = projects + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ListProjectsResponse': + """Initialize a ListProjectsResponse object from a json dictionary.""" + args = {} + if (projects := _dict.get('projects')) is not None: + args['projects'] = [ + ProjectListDetails.from_dict(v) for v in projects + ] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ListProjectsResponse object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'projects') and self.projects is not None: + projects_list = [] + for v in self.projects: + if isinstance(v, dict): + projects_list.append(v) + else: + projects_list.append(v.to_dict()) + _dict['projects'] = projects_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ListProjectsResponse object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ListProjectsResponse') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ListProjectsResponse') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ModelEvaluationMacroAverage: + """ + A macro-average computes metric independently for each class and then takes the + average. Class refers to the classification label that is specified in the + **answer_field**. + + :param float precision: A metric that measures how many of the overall documents + are classified correctly. + :param float recall: A metric that measures how often documents that should be + classified into certain classes are classified into those classes. + :param float f1: A metric that measures whether the optimal balance between + precision and recall is reached. The F1 score can be interpreted as a weighted + average of the precision and recall values. An F1 score reaches its best value + at 1 and worst value at 0. + """ + + def __init__( + self, + precision: float, + recall: float, + f1: float, + ) -> None: + """ + Initialize a ModelEvaluationMacroAverage object. + + :param float precision: A metric that measures how many of the overall + documents are classified correctly. + :param float recall: A metric that measures how often documents that should + be classified into certain classes are classified into those classes. + :param float f1: A metric that measures whether the optimal balance between + precision and recall is reached. The F1 score can be interpreted as a + weighted average of the precision and recall values. An F1 score reaches + its best value at 1 and worst value at 0. + """ + self.precision = precision + self.recall = recall + self.f1 = f1 + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ModelEvaluationMacroAverage': + """Initialize a ModelEvaluationMacroAverage object from a json dictionary.""" + args = {} + if (precision := _dict.get('precision')) is not None: + args['precision'] = precision + else: + raise ValueError( + 'Required property \'precision\' not present in ModelEvaluationMacroAverage JSON' + ) + if (recall := _dict.get('recall')) is not None: + args['recall'] = recall + else: + raise ValueError( + 'Required property \'recall\' not present in ModelEvaluationMacroAverage JSON' + ) + if (f1 := _dict.get('f1')) is not None: + args['f1'] = f1 + else: + raise ValueError( + 'Required property \'f1\' not present in ModelEvaluationMacroAverage JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ModelEvaluationMacroAverage object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'precision') and self.precision is not None: + _dict['precision'] = self.precision + if hasattr(self, 'recall') and self.recall is not None: + _dict['recall'] = self.recall + if hasattr(self, 'f1') and self.f1 is not None: + _dict['f1'] = self.f1 + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ModelEvaluationMacroAverage object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ModelEvaluationMacroAverage') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ModelEvaluationMacroAverage') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ModelEvaluationMicroAverage: + """ + A micro-average aggregates the contributions of all classes to compute the average + metric. Classes refers to the classification labels that are specified in the + **answer_field**. + + :param float precision: A metric that measures how many of the overall documents + are classified correctly. + :param float recall: A metric that measures how often documents that should be + classified into certain classes are classified into those classes. + :param float f1: A metric that measures whether the optimal balance between + precision and recall is reached. The F1 score can be interpreted as a weighted + average of the precision and recall values. An F1 score reaches its best value + at 1 and worst value at 0. + """ + + def __init__( + self, + precision: float, + recall: float, + f1: float, + ) -> None: + """ + Initialize a ModelEvaluationMicroAverage object. + + :param float precision: A metric that measures how many of the overall + documents are classified correctly. + :param float recall: A metric that measures how often documents that should + be classified into certain classes are classified into those classes. + :param float f1: A metric that measures whether the optimal balance between + precision and recall is reached. The F1 score can be interpreted as a + weighted average of the precision and recall values. An F1 score reaches + its best value at 1 and worst value at 0. + """ + self.precision = precision + self.recall = recall + self.f1 = f1 + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ModelEvaluationMicroAverage': + """Initialize a ModelEvaluationMicroAverage object from a json dictionary.""" + args = {} + if (precision := _dict.get('precision')) is not None: + args['precision'] = precision + else: + raise ValueError( + 'Required property \'precision\' not present in ModelEvaluationMicroAverage JSON' + ) + if (recall := _dict.get('recall')) is not None: + args['recall'] = recall + else: + raise ValueError( + 'Required property \'recall\' not present in ModelEvaluationMicroAverage JSON' + ) + if (f1 := _dict.get('f1')) is not None: + args['f1'] = f1 + else: + raise ValueError( + 'Required property \'f1\' not present in ModelEvaluationMicroAverage JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ModelEvaluationMicroAverage object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'precision') and self.precision is not None: + _dict['precision'] = self.precision + if hasattr(self, 'recall') and self.recall is not None: + _dict['recall'] = self.recall + if hasattr(self, 'f1') and self.f1 is not None: + _dict['f1'] = self.f1 + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ModelEvaluationMicroAverage object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ModelEvaluationMicroAverage') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ModelEvaluationMicroAverage') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Notice: + """ + A notice produced for the collection. + + :param str notice_id: (optional) Identifies the notice. Many notices might have + the same ID. This field exists so that user applications can programmatically + identify a notice and take automatic corrective action. Typical notice IDs + include: + `index_failed`, `index_failed_too_many_requests`, + `index_failed_incompatible_field`, `index_failed_cluster_unavailable`, + `ingestion_timeout`, `ingestion_error`, `bad_request`, `internal_error`, + `missing_model`, `unsupported_model`, + `smart_document_understanding_failed_incompatible_field`, + `smart_document_understanding_failed_internal_error`, + `smart_document_understanding_failed_internal_error`, + `smart_document_understanding_failed_warning`, + `smart_document_understanding_page_error`, + `smart_document_understanding_page_warning`. **Note:** This is not a complete + list. Other values might be returned. + :param datetime created: (optional) The creation date of the collection in the + format yyyy-MM-dd'T'HH:mm:ss.SSS'Z'. + :param str document_id: (optional) Unique identifier of the document. + :param str collection_id: (optional) Unique identifier of the collection. + :param str query_id: (optional) Unique identifier of the query used for + relevance training. + :param str severity: (optional) Severity level of the notice. + :param str step: (optional) Ingestion or training step in which the notice + occurred. + :param str description: (optional) The description of the notice. + """ + + def __init__( + self, + *, + notice_id: Optional[str] = None, + created: Optional[datetime] = None, + document_id: Optional[str] = None, + collection_id: Optional[str] = None, + query_id: Optional[str] = None, + severity: Optional[str] = None, + step: Optional[str] = None, + description: Optional[str] = None, + ) -> None: + """ + Initialize a Notice object. + + """ + self.notice_id = notice_id + self.created = created + self.document_id = document_id + self.collection_id = collection_id + self.query_id = query_id + self.severity = severity + self.step = step + self.description = description + + @classmethod + def from_dict(cls, _dict: Dict) -> 'Notice': + """Initialize a Notice object from a json dictionary.""" + args = {} + if (notice_id := _dict.get('notice_id')) is not None: + args['notice_id'] = notice_id + if (created := _dict.get('created')) is not None: + args['created'] = string_to_datetime(created) + if (document_id := _dict.get('document_id')) is not None: + args['document_id'] = document_id + if (collection_id := _dict.get('collection_id')) is not None: + args['collection_id'] = collection_id + if (query_id := _dict.get('query_id')) is not None: + args['query_id'] = query_id + if (severity := _dict.get('severity')) is not None: + args['severity'] = severity + if (step := _dict.get('step')) is not None: + args['step'] = step + if (description := _dict.get('description')) is not None: + args['description'] = description + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Notice object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'notice_id') and getattr(self, + 'notice_id') is not None: + _dict['notice_id'] = getattr(self, 'notice_id') + if hasattr(self, 'created') and getattr(self, 'created') is not None: + _dict['created'] = datetime_to_string(getattr(self, 'created')) + if hasattr(self, 'document_id') and getattr(self, + 'document_id') is not None: + _dict['document_id'] = getattr(self, 'document_id') + if hasattr(self, 'collection_id') and getattr( + self, 'collection_id') is not None: + _dict['collection_id'] = getattr(self, 'collection_id') + if hasattr(self, 'query_id') and getattr(self, 'query_id') is not None: + _dict['query_id'] = getattr(self, 'query_id') + if hasattr(self, 'severity') and getattr(self, 'severity') is not None: + _dict['severity'] = getattr(self, 'severity') + if hasattr(self, 'step') and getattr(self, 'step') is not None: + _dict['step'] = getattr(self, 'step') + if hasattr(self, 'description') and getattr(self, + 'description') is not None: + _dict['description'] = getattr(self, 'description') + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Notice object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'Notice') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'Notice') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class SeverityEnum(str, Enum): + """ + Severity level of the notice. + """ + + WARNING = 'warning' + ERROR = 'error' + + +class PerClassModelEvaluation: + """ + An object that measures the metrics from a training run for each classification label + separately. + + :param str name: Class name. Each class name is derived from a value in the + **answer_field**. + :param float precision: A metric that measures how many of the overall documents + are classified correctly. + :param float recall: A metric that measures how often documents that should be + classified into certain classes are classified into those classes. + :param float f1: A metric that measures whether the optimal balance between + precision and recall is reached. The F1 score can be interpreted as a weighted + average of the precision and recall values. An F1 score reaches its best value + at 1 and worst value at 0. + """ + + def __init__( + self, + name: str, + precision: float, + recall: float, + f1: float, + ) -> None: + """ + Initialize a PerClassModelEvaluation object. + + :param str name: Class name. Each class name is derived from a value in the + **answer_field**. + :param float precision: A metric that measures how many of the overall + documents are classified correctly. + :param float recall: A metric that measures how often documents that should + be classified into certain classes are classified into those classes. + :param float f1: A metric that measures whether the optimal balance between + precision and recall is reached. The F1 score can be interpreted as a + weighted average of the precision and recall values. An F1 score reaches + its best value at 1 and worst value at 0. + """ + self.name = name + self.precision = precision + self.recall = recall + self.f1 = f1 + + @classmethod + def from_dict(cls, _dict: Dict) -> 'PerClassModelEvaluation': + """Initialize a PerClassModelEvaluation object from a json dictionary.""" + args = {} + if (name := _dict.get('name')) is not None: + args['name'] = name + else: + raise ValueError( + 'Required property \'name\' not present in PerClassModelEvaluation JSON' + ) + if (precision := _dict.get('precision')) is not None: + args['precision'] = precision + else: + raise ValueError( + 'Required property \'precision\' not present in PerClassModelEvaluation JSON' + ) + if (recall := _dict.get('recall')) is not None: + args['recall'] = recall + else: + raise ValueError( + 'Required property \'recall\' not present in PerClassModelEvaluation JSON' + ) + if (f1 := _dict.get('f1')) is not None: + args['f1'] = f1 + else: + raise ValueError( + 'Required property \'f1\' not present in PerClassModelEvaluation JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a PerClassModelEvaluation object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'precision') and self.precision is not None: + _dict['precision'] = self.precision + if hasattr(self, 'recall') and self.recall is not None: + _dict['recall'] = self.recall + if hasattr(self, 'f1') and self.f1 is not None: + _dict['f1'] = self.f1 + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this PerClassModelEvaluation object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'PerClassModelEvaluation') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'PerClassModelEvaluation') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ProjectDetails: + """ + Detailed information about the specified project. + + :param str project_id: (optional) The Universally Unique Identifier (UUID) of + this project. + :param str name: (optional) The human readable name of this project. + :param str type: (optional) The type of project. + The `content_intelligence` type is a *Document Retrieval for Contracts* project + and the `other` type is a *Custom* project. + The `content_mining` and `content_intelligence` types are available with Premium + plan managed deployments and installed deployments only. + The Intelligent Document Processing (IDP) project type is available from IBM + Cloud-managed instances only. + :param ProjectListDetailsRelevancyTrainingStatus relevancy_training_status: + (optional) Relevancy training status information for this project. + :param int collection_count: (optional) The number of collections configured in + this project. + :param DefaultQueryParams default_query_parameters: (optional) Default query + parameters for this project. + """ + + def __init__( + self, + *, + project_id: Optional[str] = None, + name: Optional[str] = None, + type: Optional[str] = None, + relevancy_training_status: Optional[ + 'ProjectListDetailsRelevancyTrainingStatus'] = None, + collection_count: Optional[int] = None, + default_query_parameters: Optional['DefaultQueryParams'] = None, + ) -> None: + """ + Initialize a ProjectDetails object. + + :param str name: (optional) The human readable name of this project. + :param str type: (optional) The type of project. + The `content_intelligence` type is a *Document Retrieval for Contracts* + project and the `other` type is a *Custom* project. + The `content_mining` and `content_intelligence` types are available with + Premium plan managed deployments and installed deployments only. + The Intelligent Document Processing (IDP) project type is available from + IBM Cloud-managed instances only. + :param DefaultQueryParams default_query_parameters: (optional) Default + query parameters for this project. + """ + self.project_id = project_id + self.name = name + self.type = type + self.relevancy_training_status = relevancy_training_status + self.collection_count = collection_count + self.default_query_parameters = default_query_parameters + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ProjectDetails': + """Initialize a ProjectDetails object from a json dictionary.""" + args = {} + if (project_id := _dict.get('project_id')) is not None: + args['project_id'] = project_id + if (name := _dict.get('name')) is not None: + args['name'] = name + if (type := _dict.get('type')) is not None: + args['type'] = type + if (relevancy_training_status := + _dict.get('relevancy_training_status')) is not None: + args[ + 'relevancy_training_status'] = ProjectListDetailsRelevancyTrainingStatus.from_dict( + relevancy_training_status) + if (collection_count := _dict.get('collection_count')) is not None: + args['collection_count'] = collection_count + if (default_query_parameters := + _dict.get('default_query_parameters')) is not None: + args['default_query_parameters'] = DefaultQueryParams.from_dict( + default_query_parameters) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ProjectDetails object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'project_id') and getattr(self, + 'project_id') is not None: + _dict['project_id'] = getattr(self, 'project_id') + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'relevancy_training_status') and getattr( + self, 'relevancy_training_status') is not None: + if isinstance(getattr(self, 'relevancy_training_status'), dict): + _dict['relevancy_training_status'] = getattr( + self, 'relevancy_training_status') + else: + _dict['relevancy_training_status'] = getattr( + self, 'relevancy_training_status').to_dict() + if hasattr(self, 'collection_count') and getattr( + self, 'collection_count') is not None: + _dict['collection_count'] = getattr(self, 'collection_count') + if hasattr(self, 'default_query_parameters' + ) and self.default_query_parameters is not None: + if isinstance(self.default_query_parameters, dict): + _dict[ + 'default_query_parameters'] = self.default_query_parameters + else: + _dict[ + 'default_query_parameters'] = self.default_query_parameters.to_dict( + ) + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ProjectDetails object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ProjectDetails') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ProjectDetails') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class TypeEnum(str, Enum): + """ + The type of project. + The `content_intelligence` type is a *Document Retrieval for Contracts* project + and the `other` type is a *Custom* project. + The `content_mining` and `content_intelligence` types are available with Premium + plan managed deployments and installed deployments only. + The Intelligent Document Processing (IDP) project type is available from IBM + Cloud-managed instances only. + """ + + INTELLIGENT_DOCUMENT_PROCESSING = 'intelligent_document_processing' + DOCUMENT_RETRIEVAL = 'document_retrieval' + CONVERSATIONAL_SEARCH = 'conversational_search' + CONTENT_MINING = 'content_mining' + CONTENT_INTELLIGENCE = 'content_intelligence' + OTHER = 'other' + + +class ProjectListDetails: + """ + Details about a specific project. + + :param str project_id: (optional) The Universally Unique Identifier (UUID) of + this project. + :param str name: (optional) The human readable name of this project. + :param str type: (optional) The type of project. + The `content_intelligence` type is a *Document Retrieval for Contracts* project + and the `other` type is a *Custom* project. + The `content_mining` and `content_intelligence` types are available with Premium + plan managed deployments and installed deployments only. + The Intelligent Document Processing (IDP) project type is available from IBM + Cloud-managed instances only. + :param ProjectListDetailsRelevancyTrainingStatus relevancy_training_status: + (optional) Relevancy training status information for this project. + :param int collection_count: (optional) The number of collections configured in + this project. + """ + + def __init__( + self, + *, + project_id: Optional[str] = None, + name: Optional[str] = None, + type: Optional[str] = None, + relevancy_training_status: Optional[ + 'ProjectListDetailsRelevancyTrainingStatus'] = None, + collection_count: Optional[int] = None, + ) -> None: + """ + Initialize a ProjectListDetails object. + + :param str name: (optional) The human readable name of this project. + :param str type: (optional) The type of project. + The `content_intelligence` type is a *Document Retrieval for Contracts* + project and the `other` type is a *Custom* project. + The `content_mining` and `content_intelligence` types are available with + Premium plan managed deployments and installed deployments only. + The Intelligent Document Processing (IDP) project type is available from + IBM Cloud-managed instances only. + """ + self.project_id = project_id + self.name = name + self.type = type + self.relevancy_training_status = relevancy_training_status + self.collection_count = collection_count + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ProjectListDetails': + """Initialize a ProjectListDetails object from a json dictionary.""" + args = {} + if (project_id := _dict.get('project_id')) is not None: + args['project_id'] = project_id + if (name := _dict.get('name')) is not None: + args['name'] = name + if (type := _dict.get('type')) is not None: + args['type'] = type + if (relevancy_training_status := + _dict.get('relevancy_training_status')) is not None: + args[ + 'relevancy_training_status'] = ProjectListDetailsRelevancyTrainingStatus.from_dict( + relevancy_training_status) + if (collection_count := _dict.get('collection_count')) is not None: + args['collection_count'] = collection_count + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ProjectListDetails object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'project_id') and getattr(self, + 'project_id') is not None: + _dict['project_id'] = getattr(self, 'project_id') + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'relevancy_training_status') and getattr( + self, 'relevancy_training_status') is not None: + if isinstance(getattr(self, 'relevancy_training_status'), dict): + _dict['relevancy_training_status'] = getattr( + self, 'relevancy_training_status') + else: + _dict['relevancy_training_status'] = getattr( + self, 'relevancy_training_status').to_dict() + if hasattr(self, 'collection_count') and getattr( + self, 'collection_count') is not None: + _dict['collection_count'] = getattr(self, 'collection_count') + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ProjectListDetails object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ProjectListDetails') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ProjectListDetails') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class TypeEnum(str, Enum): + """ + The type of project. + The `content_intelligence` type is a *Document Retrieval for Contracts* project + and the `other` type is a *Custom* project. + The `content_mining` and `content_intelligence` types are available with Premium + plan managed deployments and installed deployments only. + The Intelligent Document Processing (IDP) project type is available from IBM + Cloud-managed instances only. + """ + + INTELLIGENT_DOCUMENT_PROCESSING = 'intelligent_document_processing' + DOCUMENT_RETRIEVAL = 'document_retrieval' + CONVERSATIONAL_SEARCH = 'conversational_search' + CONTENT_MINING = 'content_mining' + CONTENT_INTELLIGENCE = 'content_intelligence' + OTHER = 'other' + + +class ProjectListDetailsRelevancyTrainingStatus: + """ + Relevancy training status information for this project. + + :param str data_updated: (optional) When the training data was updated. + :param int total_examples: (optional) The total number of examples. + :param bool sufficient_label_diversity: (optional) When `true`, sufficient label + diversity is present to allow training for this project. + :param bool processing: (optional) When `true`, the relevancy training is in + processing. + :param bool minimum_examples_added: (optional) When `true`, the minimum number + of examples required to train has been met. + :param str successfully_trained: (optional) The time that the most recent + successful training occurred. + :param bool available: (optional) When `true`, relevancy training is available + when querying collections in the project. + :param int notices: (optional) The number of notices generated during the + relevancy training. + :param bool minimum_queries_added: (optional) When `true`, the minimum number of + queries required to train has been met. + """ + + def __init__( + self, + *, + data_updated: Optional[str] = None, + total_examples: Optional[int] = None, + sufficient_label_diversity: Optional[bool] = None, + processing: Optional[bool] = None, + minimum_examples_added: Optional[bool] = None, + successfully_trained: Optional[str] = None, + available: Optional[bool] = None, + notices: Optional[int] = None, + minimum_queries_added: Optional[bool] = None, + ) -> None: + """ + Initialize a ProjectListDetailsRelevancyTrainingStatus object. + + :param str data_updated: (optional) When the training data was updated. + :param int total_examples: (optional) The total number of examples. + :param bool sufficient_label_diversity: (optional) When `true`, sufficient + label diversity is present to allow training for this project. + :param bool processing: (optional) When `true`, the relevancy training is + in processing. + :param bool minimum_examples_added: (optional) When `true`, the minimum + number of examples required to train has been met. + :param str successfully_trained: (optional) The time that the most recent + successful training occurred. + :param bool available: (optional) When `true`, relevancy training is + available when querying collections in the project. + :param int notices: (optional) The number of notices generated during the + relevancy training. + :param bool minimum_queries_added: (optional) When `true`, the minimum + number of queries required to train has been met. + """ + self.data_updated = data_updated + self.total_examples = total_examples + self.sufficient_label_diversity = sufficient_label_diversity + self.processing = processing + self.minimum_examples_added = minimum_examples_added + self.successfully_trained = successfully_trained + self.available = available + self.notices = notices + self.minimum_queries_added = minimum_queries_added + + @classmethod + def from_dict(cls, + _dict: Dict) -> 'ProjectListDetailsRelevancyTrainingStatus': + """Initialize a ProjectListDetailsRelevancyTrainingStatus object from a json dictionary.""" + args = {} + if (data_updated := _dict.get('data_updated')) is not None: + args['data_updated'] = data_updated + if (total_examples := _dict.get('total_examples')) is not None: + args['total_examples'] = total_examples + if (sufficient_label_diversity := + _dict.get('sufficient_label_diversity')) is not None: + args['sufficient_label_diversity'] = sufficient_label_diversity + if (processing := _dict.get('processing')) is not None: + args['processing'] = processing + if (minimum_examples_added := + _dict.get('minimum_examples_added')) is not None: + args['minimum_examples_added'] = minimum_examples_added + if (successfully_trained := + _dict.get('successfully_trained')) is not None: + args['successfully_trained'] = successfully_trained + if (available := _dict.get('available')) is not None: + args['available'] = available + if (notices := _dict.get('notices')) is not None: + args['notices'] = notices + if (minimum_queries_added := + _dict.get('minimum_queries_added')) is not None: + args['minimum_queries_added'] = minimum_queries_added + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ProjectListDetailsRelevancyTrainingStatus object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'data_updated') and self.data_updated is not None: + _dict['data_updated'] = self.data_updated + if hasattr(self, 'total_examples') and self.total_examples is not None: + _dict['total_examples'] = self.total_examples + if hasattr(self, 'sufficient_label_diversity' + ) and self.sufficient_label_diversity is not None: + _dict[ + 'sufficient_label_diversity'] = self.sufficient_label_diversity + if hasattr(self, 'processing') and self.processing is not None: + _dict['processing'] = self.processing + if hasattr(self, 'minimum_examples_added' + ) and self.minimum_examples_added is not None: + _dict['minimum_examples_added'] = self.minimum_examples_added + if hasattr(self, 'successfully_trained' + ) and self.successfully_trained is not None: + _dict['successfully_trained'] = self.successfully_trained + if hasattr(self, 'available') and self.available is not None: + _dict['available'] = self.available + if hasattr(self, 'notices') and self.notices is not None: + _dict['notices'] = self.notices + if hasattr(self, 'minimum_queries_added' + ) and self.minimum_queries_added is not None: + _dict['minimum_queries_added'] = self.minimum_queries_added + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ProjectListDetailsRelevancyTrainingStatus object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, + other: 'ProjectListDetailsRelevancyTrainingStatus') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, + other: 'ProjectListDetailsRelevancyTrainingStatus') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class QueryAggregation: + """ + An object that defines how to aggregate query results. + + """ + + def __init__(self,) -> None: + """ + Initialize a QueryAggregation object. + + """ + msg = "Cannot instantiate base class. Instead, instantiate one of the defined subclasses: {0}".format( + ", ".join([ + 'QueryAggregationQueryTermAggregation', + 'QueryAggregationQueryGroupByAggregation', + 'QueryAggregationQueryHistogramAggregation', + 'QueryAggregationQueryTimesliceAggregation', + 'QueryAggregationQueryNestedAggregation', + 'QueryAggregationQueryFilterAggregation', + 'QueryAggregationQueryCalculationAggregation', + 'QueryAggregationQueryTopHitsAggregation', + 'QueryAggregationQueryPairAggregation', + 'QueryAggregationQueryTrendAggregation', + 'QueryAggregationQueryTopicAggregation' + ])) + raise Exception(msg) + + @classmethod + def from_dict(cls, _dict: Dict) -> 'QueryAggregation': + """Initialize a QueryAggregation object from a json dictionary.""" + disc_class = cls._get_class_by_discriminator(_dict) + if disc_class != cls: + return disc_class.from_dict(_dict) + msg = "Cannot convert dictionary into an instance of base class 'QueryAggregation'. The discriminator value should map to a valid subclass: {1}".format( + ", ".join([ + 'QueryAggregationQueryTermAggregation', + 'QueryAggregationQueryGroupByAggregation', + 'QueryAggregationQueryHistogramAggregation', + 'QueryAggregationQueryTimesliceAggregation', + 'QueryAggregationQueryNestedAggregation', + 'QueryAggregationQueryFilterAggregation', + 'QueryAggregationQueryCalculationAggregation', + 'QueryAggregationQueryTopHitsAggregation', + 'QueryAggregationQueryPairAggregation', + 'QueryAggregationQueryTrendAggregation', + 'QueryAggregationQueryTopicAggregation' + ])) + raise Exception(msg) + + @classmethod + def _from_dict(cls, _dict: Dict): + """Initialize a QueryAggregation object from a json dictionary.""" + return cls.from_dict(_dict) + + @classmethod + def _get_class_by_discriminator(cls, _dict: Dict) -> object: + mapping = {} + mapping['term'] = 'QueryAggregationQueryTermAggregation' + mapping['group_by'] = 'QueryAggregationQueryGroupByAggregation' + mapping['histogram'] = 'QueryAggregationQueryHistogramAggregation' + mapping['timeslice'] = 'QueryAggregationQueryTimesliceAggregation' + mapping['nested'] = 'QueryAggregationQueryNestedAggregation' + mapping['filter'] = 'QueryAggregationQueryFilterAggregation' + mapping['min'] = 'QueryAggregationQueryCalculationAggregation' + mapping['max'] = 'QueryAggregationQueryCalculationAggregation' + mapping['sum'] = 'QueryAggregationQueryCalculationAggregation' + mapping['average'] = 'QueryAggregationQueryCalculationAggregation' + mapping['unique_count'] = 'QueryAggregationQueryCalculationAggregation' + mapping['top_hits'] = 'QueryAggregationQueryTopHitsAggregation' + mapping['pair'] = 'QueryAggregationQueryPairAggregation' + mapping['trend'] = 'QueryAggregationQueryTrendAggregation' + mapping['topic'] = 'QueryAggregationQueryTopicAggregation' + disc_value = _dict.get('type') + if disc_value is None: + raise ValueError( + 'Discriminator property \'type\' not found in QueryAggregation JSON' + ) + class_name = mapping.get(disc_value, disc_value) + try: + disc_class = getattr(sys.modules[__name__], class_name) + except AttributeError: + disc_class = cls + if isinstance(disc_class, object): + return disc_class + raise TypeError('%s is not a discriminator class' % class_name) + + +class QueryGroupByAggregationResult: + """ + Result group for the `group_by` aggregation. + + :param str key: The condition that is met by the documents in this group. For + example, `YEARTXT<2000`. + :param int matching_results: Number of documents that meet the query and + condition. + :param float relevancy: (optional) The relevancy for this group. Returned only + if `relevancy:true` is specified in the request. + :param int total_matching_documents: (optional) Number of documents that meet + the condition in the whole set of documents in this collection. Returned only + when `relevancy:true` is specified in the request. + :param float estimated_matching_results: (optional) The number of documents that + are estimated to match the query and condition. Returned only when + `relevancy:true` is specified in the request. + :param List[dict] aggregations: (optional) An array of subaggregations. Returned + only when this aggregation is returned as a subaggregation. + """ + + def __init__( + self, + key: str, + matching_results: int, + *, + relevancy: Optional[float] = None, + total_matching_documents: Optional[int] = None, + estimated_matching_results: Optional[float] = None, + aggregations: Optional[List[dict]] = None, + ) -> None: + """ + Initialize a QueryGroupByAggregationResult object. + + :param str key: The condition that is met by the documents in this group. + For example, `YEARTXT<2000`. + :param int matching_results: Number of documents that meet the query and + condition. + :param float relevancy: (optional) The relevancy for this group. Returned + only if `relevancy:true` is specified in the request. + :param int total_matching_documents: (optional) Number of documents that + meet the condition in the whole set of documents in this collection. + Returned only when `relevancy:true` is specified in the request. + :param float estimated_matching_results: (optional) The number of documents + that are estimated to match the query and condition. Returned only when + `relevancy:true` is specified in the request. + :param List[dict] aggregations: (optional) An array of subaggregations. + Returned only when this aggregation is returned as a subaggregation. + """ + self.key = key + self.matching_results = matching_results + self.relevancy = relevancy + self.total_matching_documents = total_matching_documents + self.estimated_matching_results = estimated_matching_results + self.aggregations = aggregations + + @classmethod + def from_dict(cls, _dict: Dict) -> 'QueryGroupByAggregationResult': + """Initialize a QueryGroupByAggregationResult object from a json dictionary.""" + args = {} + if (key := _dict.get('key')) is not None: + args['key'] = key + else: + raise ValueError( + 'Required property \'key\' not present in QueryGroupByAggregationResult JSON' + ) + if (matching_results := _dict.get('matching_results')) is not None: + args['matching_results'] = matching_results + else: + raise ValueError( + 'Required property \'matching_results\' not present in QueryGroupByAggregationResult JSON' + ) + if (relevancy := _dict.get('relevancy')) is not None: + args['relevancy'] = relevancy + if (total_matching_documents := + _dict.get('total_matching_documents')) is not None: + args['total_matching_documents'] = total_matching_documents + if (estimated_matching_results := + _dict.get('estimated_matching_results')) is not None: + args['estimated_matching_results'] = estimated_matching_results + if (aggregations := _dict.get('aggregations')) is not None: + args['aggregations'] = aggregations + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a QueryGroupByAggregationResult object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'key') and self.key is not None: + _dict['key'] = self.key + if hasattr(self, + 'matching_results') and self.matching_results is not None: + _dict['matching_results'] = self.matching_results + if hasattr(self, 'relevancy') and self.relevancy is not None: + _dict['relevancy'] = self.relevancy + if hasattr(self, 'total_matching_documents' + ) and self.total_matching_documents is not None: + _dict['total_matching_documents'] = self.total_matching_documents + if hasattr(self, 'estimated_matching_results' + ) and self.estimated_matching_results is not None: + _dict[ + 'estimated_matching_results'] = self.estimated_matching_results + if hasattr(self, 'aggregations') and self.aggregations is not None: + _dict['aggregations'] = self.aggregations + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this QueryGroupByAggregationResult object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'QueryGroupByAggregationResult') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'QueryGroupByAggregationResult') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class QueryHistogramAggregationResult: + """ + Histogram numeric interval result. + + :param int key: The value of the upper bound for the numeric segment. + :param int matching_results: Number of documents with the specified key as the + upper bound. + :param List[dict] aggregations: (optional) An array of subaggregations. Returned + only when this aggregation is returned as a subaggregation. + """ + + def __init__( + self, + key: int, + matching_results: int, + *, + aggregations: Optional[List[dict]] = None, + ) -> None: + """ + Initialize a QueryHistogramAggregationResult object. + + :param int key: The value of the upper bound for the numeric segment. + :param int matching_results: Number of documents with the specified key as + the upper bound. + :param List[dict] aggregations: (optional) An array of subaggregations. + Returned only when this aggregation is returned as a subaggregation. + """ + self.key = key + self.matching_results = matching_results + self.aggregations = aggregations + + @classmethod + def from_dict(cls, _dict: Dict) -> 'QueryHistogramAggregationResult': + """Initialize a QueryHistogramAggregationResult object from a json dictionary.""" + args = {} + if (key := _dict.get('key')) is not None: + args['key'] = key + else: + raise ValueError( + 'Required property \'key\' not present in QueryHistogramAggregationResult JSON' + ) + if (matching_results := _dict.get('matching_results')) is not None: + args['matching_results'] = matching_results + else: + raise ValueError( + 'Required property \'matching_results\' not present in QueryHistogramAggregationResult JSON' + ) + if (aggregations := _dict.get('aggregations')) is not None: + args['aggregations'] = aggregations + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a QueryHistogramAggregationResult object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'key') and self.key is not None: + _dict['key'] = self.key + if hasattr(self, + 'matching_results') and self.matching_results is not None: + _dict['matching_results'] = self.matching_results + if hasattr(self, 'aggregations') and self.aggregations is not None: + _dict['aggregations'] = self.aggregations + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this QueryHistogramAggregationResult object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'QueryHistogramAggregationResult') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'QueryHistogramAggregationResult') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class QueryLargePassages: + """ + Configuration for passage retrieval. + + :param bool enabled: (optional) A passages query that returns the most relevant + passages from the results. + :param bool per_document: (optional) If `true`, ranks the documents by document + quality, and then returns the highest-ranked passages per document in a + `document_passages` field for each document entry in the results list of the + response. + If `false`, ranks the passages from all of the documents by passage quality + regardless of the document quality and returns them in a separate `passages` + field in the response. + :param int max_per_document: (optional) Maximum number of passages to return per + document in the result. Ignored if **passages.per_document** is `false`. + :param List[str] fields: (optional) A list of fields to extract passages from. + By default, passages are extracted from the `text` and `title` fields only. If + you add this parameter and specify an empty list (`[]`) as its value, then the + service searches all root-level fields for suitable passages. + :param int count: (optional) The maximum number of passages to return. Ignored + if **passages.per_document** is `true`. + :param int characters: (optional) The approximate number of characters that any + one passage will have. + :param bool find_answers: (optional) When true, `answer` objects are returned as + part of each passage in the query results. The primary difference between an + `answer` and a `passage` is that the length of a passage is defined by the + query, where the length of an `answer` is calculated by Discovery based on how + much text is needed to answer the question. + This parameter is ignored if passages are not enabled for the query, or no + **natural_language_query** is specified. + If the **find_answers** parameter is set to `true` and **per_document** + parameter is also set to `true`, then the document search results and the + passage search results within each document are reordered using the answer + confidences. The goal of this reordering is to place the best answer as the + first answer of the first passage of the first document. Similarly, if the + **find_answers** parameter is set to `true` and **per_document** parameter is + set to `false`, then the passage search results are reordered in decreasing + order of the highest confidence answer for each document and passage. + The **find_answers** parameter is available only on managed instances of + Discovery. + :param int max_answers_per_passage: (optional) The number of `answer` objects to + return per passage if the **find_answers** parmeter is specified as `true`. + """ + + def __init__( + self, + *, + enabled: Optional[bool] = None, + per_document: Optional[bool] = None, + max_per_document: Optional[int] = None, + fields: Optional[List[str]] = None, + count: Optional[int] = None, + characters: Optional[int] = None, + find_answers: Optional[bool] = None, + max_answers_per_passage: Optional[int] = None, + ) -> None: + """ + Initialize a QueryLargePassages object. + + :param bool enabled: (optional) A passages query that returns the most + relevant passages from the results. + :param bool per_document: (optional) If `true`, ranks the documents by + document quality, and then returns the highest-ranked passages per document + in a `document_passages` field for each document entry in the results list + of the response. + If `false`, ranks the passages from all of the documents by passage quality + regardless of the document quality and returns them in a separate + `passages` field in the response. + :param int max_per_document: (optional) Maximum number of passages to + return per document in the result. Ignored if **passages.per_document** is + `false`. + :param List[str] fields: (optional) A list of fields to extract passages + from. By default, passages are extracted from the `text` and `title` fields + only. If you add this parameter and specify an empty list (`[]`) as its + value, then the service searches all root-level fields for suitable + passages. + :param int count: (optional) The maximum number of passages to return. + Ignored if **passages.per_document** is `true`. + :param int characters: (optional) The approximate number of characters that + any one passage will have. + :param bool find_answers: (optional) When true, `answer` objects are + returned as part of each passage in the query results. The primary + difference between an `answer` and a `passage` is that the length of a + passage is defined by the query, where the length of an `answer` is + calculated by Discovery based on how much text is needed to answer the + question. + This parameter is ignored if passages are not enabled for the query, or no + **natural_language_query** is specified. + If the **find_answers** parameter is set to `true` and **per_document** + parameter is also set to `true`, then the document search results and the + passage search results within each document are reordered using the answer + confidences. The goal of this reordering is to place the best answer as the + first answer of the first passage of the first document. Similarly, if the + **find_answers** parameter is set to `true` and **per_document** parameter + is set to `false`, then the passage search results are reordered in + decreasing order of the highest confidence answer for each document and + passage. + The **find_answers** parameter is available only on managed instances of + Discovery. + :param int max_answers_per_passage: (optional) The number of `answer` + objects to return per passage if the **find_answers** parmeter is specified + as `true`. + """ + self.enabled = enabled + self.per_document = per_document + self.max_per_document = max_per_document + self.fields = fields + self.count = count + self.characters = characters + self.find_answers = find_answers + self.max_answers_per_passage = max_answers_per_passage + + @classmethod + def from_dict(cls, _dict: Dict) -> 'QueryLargePassages': + """Initialize a QueryLargePassages object from a json dictionary.""" + args = {} + if (enabled := _dict.get('enabled')) is not None: + args['enabled'] = enabled + if (per_document := _dict.get('per_document')) is not None: + args['per_document'] = per_document + if (max_per_document := _dict.get('max_per_document')) is not None: + args['max_per_document'] = max_per_document + if (fields := _dict.get('fields')) is not None: + args['fields'] = fields + if (count := _dict.get('count')) is not None: + args['count'] = count + if (characters := _dict.get('characters')) is not None: + args['characters'] = characters + if (find_answers := _dict.get('find_answers')) is not None: + args['find_answers'] = find_answers + if (max_answers_per_passage := + _dict.get('max_answers_per_passage')) is not None: + args['max_answers_per_passage'] = max_answers_per_passage + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a QueryLargePassages object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'enabled') and self.enabled is not None: + _dict['enabled'] = self.enabled + if hasattr(self, 'per_document') and self.per_document is not None: + _dict['per_document'] = self.per_document + if hasattr(self, + 'max_per_document') and self.max_per_document is not None: + _dict['max_per_document'] = self.max_per_document + if hasattr(self, 'fields') and self.fields is not None: + _dict['fields'] = self.fields + if hasattr(self, 'count') and self.count is not None: + _dict['count'] = self.count + if hasattr(self, 'characters') and self.characters is not None: + _dict['characters'] = self.characters + if hasattr(self, 'find_answers') and self.find_answers is not None: + _dict['find_answers'] = self.find_answers + if hasattr(self, 'max_answers_per_passage' + ) and self.max_answers_per_passage is not None: + _dict['max_answers_per_passage'] = self.max_answers_per_passage + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this QueryLargePassages object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'QueryLargePassages') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'QueryLargePassages') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class QueryLargeSimilar: + """ + Finds results from documents that are similar to documents of interest. Use this + parameter to add a *More like these* function to your search. You can include this + parameter with or without a **query**, **filter** or **natural_language_query** + parameter. + + :param bool enabled: (optional) When `true`, includes documents in the query + results that are similar to documents you specify. + :param List[str] document_ids: (optional) The list of documents of interest. + Required if **enabled** is `true`. + :param List[str] fields: (optional) Looks for similarities in the specified + subset of fields in the documents. If not specified, all of the document fields + are used. + """ + + def __init__( + self, + *, + enabled: Optional[bool] = None, + document_ids: Optional[List[str]] = None, + fields: Optional[List[str]] = None, + ) -> None: + """ + Initialize a QueryLargeSimilar object. + + :param bool enabled: (optional) When `true`, includes documents in the + query results that are similar to documents you specify. + :param List[str] document_ids: (optional) The list of documents of + interest. Required if **enabled** is `true`. + :param List[str] fields: (optional) Looks for similarities in the specified + subset of fields in the documents. If not specified, all of the document + fields are used. + """ + self.enabled = enabled + self.document_ids = document_ids + self.fields = fields + + @classmethod + def from_dict(cls, _dict: Dict) -> 'QueryLargeSimilar': + """Initialize a QueryLargeSimilar object from a json dictionary.""" + args = {} + if (enabled := _dict.get('enabled')) is not None: + args['enabled'] = enabled + if (document_ids := _dict.get('document_ids')) is not None: + args['document_ids'] = document_ids + if (fields := _dict.get('fields')) is not None: + args['fields'] = fields + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a QueryLargeSimilar object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'enabled') and self.enabled is not None: + _dict['enabled'] = self.enabled + if hasattr(self, 'document_ids') and self.document_ids is not None: + _dict['document_ids'] = self.document_ids + if hasattr(self, 'fields') and self.fields is not None: + _dict['fields'] = self.fields + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this QueryLargeSimilar object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'QueryLargeSimilar') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'QueryLargeSimilar') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class QueryLargeSuggestedRefinements: + """ + Configuration for suggested refinements. + **Note**: The **suggested_refinements** parameter that identified dynamic facets from + the data is deprecated. + + :param bool enabled: (optional) Whether to perform suggested refinements. + :param int count: (optional) Maximum number of suggested refinements texts to be + returned. The maximum is `100`. + """ + + def __init__( + self, + *, + enabled: Optional[bool] = None, + count: Optional[int] = None, + ) -> None: + """ + Initialize a QueryLargeSuggestedRefinements object. + + :param bool enabled: (optional) Whether to perform suggested refinements. + :param int count: (optional) Maximum number of suggested refinements texts + to be returned. The maximum is `100`. + """ + self.enabled = enabled + self.count = count + + @classmethod + def from_dict(cls, _dict: Dict) -> 'QueryLargeSuggestedRefinements': + """Initialize a QueryLargeSuggestedRefinements object from a json dictionary.""" + args = {} + if (enabled := _dict.get('enabled')) is not None: + args['enabled'] = enabled + if (count := _dict.get('count')) is not None: + args['count'] = count + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a QueryLargeSuggestedRefinements object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'enabled') and self.enabled is not None: + _dict['enabled'] = self.enabled + if hasattr(self, 'count') and self.count is not None: + _dict['count'] = self.count + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this QueryLargeSuggestedRefinements object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'QueryLargeSuggestedRefinements') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'QueryLargeSuggestedRefinements') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class QueryLargeTableResults: + """ + Configuration for table retrieval. + + :param bool enabled: (optional) Whether to enable table retrieval. + :param int count: (optional) Maximum number of tables to return. + """ + + def __init__( + self, + *, + enabled: Optional[bool] = None, + count: Optional[int] = None, + ) -> None: + """ + Initialize a QueryLargeTableResults object. + + :param bool enabled: (optional) Whether to enable table retrieval. + :param int count: (optional) Maximum number of tables to return. + """ + self.enabled = enabled + self.count = count + + @classmethod + def from_dict(cls, _dict: Dict) -> 'QueryLargeTableResults': + """Initialize a QueryLargeTableResults object from a json dictionary.""" + args = {} + if (enabled := _dict.get('enabled')) is not None: + args['enabled'] = enabled + if (count := _dict.get('count')) is not None: + args['count'] = count + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a QueryLargeTableResults object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'enabled') and self.enabled is not None: + _dict['enabled'] = self.enabled + if hasattr(self, 'count') and self.count is not None: + _dict['count'] = self.count + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this QueryLargeTableResults object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'QueryLargeTableResults') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'QueryLargeTableResults') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class QueryNoticesResponse: + """ + Object that contains notice query results. + + :param int matching_results: (optional) The number of matching results. + :param List[Notice] notices: (optional) Array of document results that match the + query. + """ + + def __init__( + self, + *, + matching_results: Optional[int] = None, + notices: Optional[List['Notice']] = None, + ) -> None: + """ + Initialize a QueryNoticesResponse object. + + :param int matching_results: (optional) The number of matching results. + :param List[Notice] notices: (optional) Array of document results that + match the query. + """ + self.matching_results = matching_results + self.notices = notices + + @classmethod + def from_dict(cls, _dict: Dict) -> 'QueryNoticesResponse': + """Initialize a QueryNoticesResponse object from a json dictionary.""" + args = {} + if (matching_results := _dict.get('matching_results')) is not None: + args['matching_results'] = matching_results + if (notices := _dict.get('notices')) is not None: + args['notices'] = [Notice.from_dict(v) for v in notices] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a QueryNoticesResponse object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, + 'matching_results') and self.matching_results is not None: + _dict['matching_results'] = self.matching_results + if hasattr(self, 'notices') and self.notices is not None: + notices_list = [] + for v in self.notices: + if isinstance(v, dict): + notices_list.append(v) + else: + notices_list.append(v.to_dict()) + _dict['notices'] = notices_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this QueryNoticesResponse object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'QueryNoticesResponse') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'QueryNoticesResponse') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class QueryPairAggregationResult: + """ + Result for the `pair` aggregation. + + :param List[dict] aggregations: (optional) Array of subaggregations of type + `term`, `group_by`, `histogram`, or `timeslice`. Each element of the matrix that + is returned contains a **relevancy** value that is calculated from the + combination of each value from the first and second aggregations. + """ + + def __init__( + self, + *, + aggregations: Optional[List[dict]] = None, + ) -> None: + """ + Initialize a QueryPairAggregationResult object. + + :param List[dict] aggregations: (optional) Array of subaggregations of type + `term`, `group_by`, `histogram`, or `timeslice`. Each element of the matrix + that is returned contains a **relevancy** value that is calculated from the + combination of each value from the first and second aggregations. + """ + self.aggregations = aggregations + + @classmethod + def from_dict(cls, _dict: Dict) -> 'QueryPairAggregationResult': + """Initialize a QueryPairAggregationResult object from a json dictionary.""" + args = {} + if (aggregations := _dict.get('aggregations')) is not None: + args['aggregations'] = aggregations + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a QueryPairAggregationResult object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'aggregations') and self.aggregations is not None: + _dict['aggregations'] = self.aggregations + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this QueryPairAggregationResult object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'QueryPairAggregationResult') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'QueryPairAggregationResult') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class QueryResponse: + """ + A response that contains the documents and aggregations for the query. + + :param int matching_results: (optional) The number of matching results for the + query. Results that match due to a curation only are not counted in the total. + :param List[QueryResult] results: (optional) Array of document results for the + query. + :param List[QueryAggregation] aggregations: (optional) Array of aggregations for + the query. + :param RetrievalDetails retrieval_details: (optional) An object contain + retrieval type information. + :param str suggested_query: (optional) Suggested correction to the submitted + **natural_language_query** value. + :param List[QuerySuggestedRefinement] suggested_refinements: (optional) + Deprecated: Array of suggested refinements. **Note**: The + `suggested_refinements` parameter that identified dynamic facets from the data + is deprecated. + :param List[QueryTableResult] table_results: (optional) Array of table results. + :param List[QueryResponsePassage] passages: (optional) Passages that best match + the query from across all of the collections in the project. Returned if + **passages.per_document** is `false`. + """ + + def __init__( + self, + *, + matching_results: Optional[int] = None, + results: Optional[List['QueryResult']] = None, + aggregations: Optional[List['QueryAggregation']] = None, + retrieval_details: Optional['RetrievalDetails'] = None, + suggested_query: Optional[str] = None, + suggested_refinements: Optional[ + List['QuerySuggestedRefinement']] = None, + table_results: Optional[List['QueryTableResult']] = None, + passages: Optional[List['QueryResponsePassage']] = None, + ) -> None: + """ + Initialize a QueryResponse object. + + :param int matching_results: (optional) The number of matching results for + the query. Results that match due to a curation only are not counted in the + total. + :param List[QueryResult] results: (optional) Array of document results for + the query. + :param List[QueryAggregation] aggregations: (optional) Array of + aggregations for the query. + :param RetrievalDetails retrieval_details: (optional) An object contain + retrieval type information. + :param str suggested_query: (optional) Suggested correction to the + submitted **natural_language_query** value. + :param List[QuerySuggestedRefinement] suggested_refinements: (optional) + Deprecated: Array of suggested refinements. **Note**: The + `suggested_refinements` parameter that identified dynamic facets from the + data is deprecated. + :param List[QueryTableResult] table_results: (optional) Array of table + results. + :param List[QueryResponsePassage] passages: (optional) Passages that best + match the query from across all of the collections in the project. Returned + if **passages.per_document** is `false`. + """ + self.matching_results = matching_results + self.results = results + self.aggregations = aggregations + self.retrieval_details = retrieval_details + self.suggested_query = suggested_query + self.suggested_refinements = suggested_refinements + self.table_results = table_results + self.passages = passages + + @classmethod + def from_dict(cls, _dict: Dict) -> 'QueryResponse': + """Initialize a QueryResponse object from a json dictionary.""" + args = {} + if (matching_results := _dict.get('matching_results')) is not None: + args['matching_results'] = matching_results + if (results := _dict.get('results')) is not None: + args['results'] = [QueryResult.from_dict(v) for v in results] + if (aggregations := _dict.get('aggregations')) is not None: + args['aggregations'] = [ + QueryAggregation.from_dict(v) for v in aggregations + ] + if (retrieval_details := _dict.get('retrieval_details')) is not None: + args['retrieval_details'] = RetrievalDetails.from_dict( + retrieval_details) + if (suggested_query := _dict.get('suggested_query')) is not None: + args['suggested_query'] = suggested_query + if (suggested_refinements := + _dict.get('suggested_refinements')) is not None: + args['suggested_refinements'] = [ + QuerySuggestedRefinement.from_dict(v) + for v in suggested_refinements + ] + if (table_results := _dict.get('table_results')) is not None: + args['table_results'] = [ + QueryTableResult.from_dict(v) for v in table_results + ] + if (passages := _dict.get('passages')) is not None: + args['passages'] = [ + QueryResponsePassage.from_dict(v) for v in passages + ] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a QueryResponse object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, + 'matching_results') and self.matching_results is not None: + _dict['matching_results'] = self.matching_results + if hasattr(self, 'results') and self.results is not None: + results_list = [] + for v in self.results: + if isinstance(v, dict): + results_list.append(v) + else: + results_list.append(v.to_dict()) + _dict['results'] = results_list + if hasattr(self, 'aggregations') and self.aggregations is not None: + aggregations_list = [] + for v in self.aggregations: + if isinstance(v, dict): + aggregations_list.append(v) + else: + aggregations_list.append(v.to_dict()) + _dict['aggregations'] = aggregations_list + if hasattr(self, + 'retrieval_details') and self.retrieval_details is not None: + if isinstance(self.retrieval_details, dict): + _dict['retrieval_details'] = self.retrieval_details + else: + _dict['retrieval_details'] = self.retrieval_details.to_dict() + if hasattr(self, + 'suggested_query') and self.suggested_query is not None: + _dict['suggested_query'] = self.suggested_query + if hasattr(self, 'suggested_refinements' + ) and self.suggested_refinements is not None: + suggested_refinements_list = [] + for v in self.suggested_refinements: + if isinstance(v, dict): + suggested_refinements_list.append(v) + else: + suggested_refinements_list.append(v.to_dict()) + _dict['suggested_refinements'] = suggested_refinements_list + if hasattr(self, 'table_results') and self.table_results is not None: + table_results_list = [] + for v in self.table_results: + if isinstance(v, dict): + table_results_list.append(v) + else: + table_results_list.append(v.to_dict()) + _dict['table_results'] = table_results_list + if hasattr(self, 'passages') and self.passages is not None: + passages_list = [] + for v in self.passages: + if isinstance(v, dict): + passages_list.append(v) + else: + passages_list.append(v.to_dict()) + _dict['passages'] = passages_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this QueryResponse object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'QueryResponse') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'QueryResponse') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class QueryResponsePassage: + """ + A passage query response. + + :param str passage_text: (optional) The content of the extracted passage. + :param float passage_score: (optional) The confidence score of the passage's + analysis. A higher score indicates greater confidence. The score is used to rank + the passages from all documents and is returned only if + **passages.per_document** is `false`. + :param str document_id: (optional) The unique identifier of the ingested + document. + :param str collection_id: (optional) The unique identifier of the collection. + :param int start_offset: (optional) The position of the first character of the + extracted passage in the originating field. + :param int end_offset: (optional) The position after the last character of the + extracted passage in the originating field. + :param str field: (optional) The label of the field from which the passage has + been extracted. + :param List[ResultPassageAnswer] answers: (optional) An array of extracted + answers to the specified query. Returned for natural language queries when + **passages.per_document** is `false`. + """ + + def __init__( + self, + *, + passage_text: Optional[str] = None, + passage_score: Optional[float] = None, + document_id: Optional[str] = None, + collection_id: Optional[str] = None, + start_offset: Optional[int] = None, + end_offset: Optional[int] = None, + field: Optional[str] = None, + answers: Optional[List['ResultPassageAnswer']] = None, + ) -> None: + """ + Initialize a QueryResponsePassage object. + + :param str passage_text: (optional) The content of the extracted passage. + :param float passage_score: (optional) The confidence score of the + passage's analysis. A higher score indicates greater confidence. The score + is used to rank the passages from all documents and is returned only if + **passages.per_document** is `false`. + :param str document_id: (optional) The unique identifier of the ingested + document. + :param str collection_id: (optional) The unique identifier of the + collection. + :param int start_offset: (optional) The position of the first character of + the extracted passage in the originating field. + :param int end_offset: (optional) The position after the last character of + the extracted passage in the originating field. + :param str field: (optional) The label of the field from which the passage + has been extracted. + :param List[ResultPassageAnswer] answers: (optional) An array of extracted + answers to the specified query. Returned for natural language queries when + **passages.per_document** is `false`. + """ + self.passage_text = passage_text + self.passage_score = passage_score + self.document_id = document_id + self.collection_id = collection_id + self.start_offset = start_offset + self.end_offset = end_offset + self.field = field + self.answers = answers + + @classmethod + def from_dict(cls, _dict: Dict) -> 'QueryResponsePassage': + """Initialize a QueryResponsePassage object from a json dictionary.""" + args = {} + if (passage_text := _dict.get('passage_text')) is not None: + args['passage_text'] = passage_text + if (passage_score := _dict.get('passage_score')) is not None: + args['passage_score'] = passage_score + if (document_id := _dict.get('document_id')) is not None: + args['document_id'] = document_id + if (collection_id := _dict.get('collection_id')) is not None: + args['collection_id'] = collection_id + if (start_offset := _dict.get('start_offset')) is not None: + args['start_offset'] = start_offset + if (end_offset := _dict.get('end_offset')) is not None: + args['end_offset'] = end_offset + if (field := _dict.get('field')) is not None: + args['field'] = field + if (answers := _dict.get('answers')) is not None: + args['answers'] = [ + ResultPassageAnswer.from_dict(v) for v in answers + ] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a QueryResponsePassage object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'passage_text') and self.passage_text is not None: + _dict['passage_text'] = self.passage_text + if hasattr(self, 'passage_score') and self.passage_score is not None: + _dict['passage_score'] = self.passage_score + if hasattr(self, 'document_id') and self.document_id is not None: + _dict['document_id'] = self.document_id + if hasattr(self, 'collection_id') and self.collection_id is not None: + _dict['collection_id'] = self.collection_id + if hasattr(self, 'start_offset') and self.start_offset is not None: + _dict['start_offset'] = self.start_offset + if hasattr(self, 'end_offset') and self.end_offset is not None: + _dict['end_offset'] = self.end_offset + if hasattr(self, 'field') and self.field is not None: + _dict['field'] = self.field + if hasattr(self, 'answers') and self.answers is not None: + answers_list = [] + for v in self.answers: + if isinstance(v, dict): + answers_list.append(v) + else: + answers_list.append(v.to_dict()) + _dict['answers'] = answers_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this QueryResponsePassage object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'QueryResponsePassage') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'QueryResponsePassage') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class QueryResult: + """ + Result document for the specified query. + + :param str document_id: The unique identifier of the document. + :param dict metadata: (optional) Metadata of the document. + :param QueryResultMetadata result_metadata: Metadata of a query result. + :param List[QueryResultPassage] document_passages: (optional) Passages from the + document that best matches the query. Returned if **passages.per_document** is + `true`. + + This type supports additional properties of type object. The remaining key-value + pairs. + """ + + # The set of defined properties for the class + _properties = frozenset( + ['document_id', 'metadata', 'result_metadata', 'document_passages']) + + def __init__( + self, + document_id: str, + result_metadata: 'QueryResultMetadata', + *, + metadata: Optional[dict] = None, + document_passages: Optional[List['QueryResultPassage']] = None, + **kwargs: Optional[object], + ) -> None: + """ + Initialize a QueryResult object. + + :param str document_id: The unique identifier of the document. + :param QueryResultMetadata result_metadata: Metadata of a query result. + :param dict metadata: (optional) Metadata of the document. + :param List[QueryResultPassage] document_passages: (optional) Passages from + the document that best matches the query. Returned if + **passages.per_document** is `true`. + :param object **kwargs: (optional) The remaining key-value pairs. + """ + self.document_id = document_id + self.metadata = metadata + self.result_metadata = result_metadata + self.document_passages = document_passages + for k, v in kwargs.items(): + if k not in QueryResult._properties: + if not isinstance(v, object): + raise ValueError( + 'Value for additional property {} must be of type object' + .format(k)) + setattr(self, k, v) + else: + raise ValueError( + 'Property {} cannot be specified as an additional property'. + format(k)) + + @classmethod + def from_dict(cls, _dict: Dict) -> 'QueryResult': + """Initialize a QueryResult object from a json dictionary.""" + args = {} + if (document_id := _dict.get('document_id')) is not None: + args['document_id'] = document_id + else: + raise ValueError( + 'Required property \'document_id\' not present in QueryResult JSON' + ) + if (metadata := _dict.get('metadata')) is not None: + args['metadata'] = metadata + if (result_metadata := _dict.get('result_metadata')) is not None: + args['result_metadata'] = QueryResultMetadata.from_dict( + result_metadata) + else: + raise ValueError( + 'Required property \'result_metadata\' not present in QueryResult JSON' + ) + if (document_passages := _dict.get('document_passages')) is not None: + args['document_passages'] = [ + QueryResultPassage.from_dict(v) for v in document_passages + ] + for k, v in _dict.items(): + if k not in cls._properties: + if not isinstance(v, object): + raise ValueError( + 'Value for additional property {} must be of type object' + .format(k)) + args[k] = v + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a QueryResult object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'document_id') and self.document_id is not None: + _dict['document_id'] = self.document_id + if hasattr(self, 'metadata') and self.metadata is not None: + _dict['metadata'] = self.metadata + if hasattr(self, + 'result_metadata') and self.result_metadata is not None: + if isinstance(self.result_metadata, dict): + _dict['result_metadata'] = self.result_metadata + else: + _dict['result_metadata'] = self.result_metadata.to_dict() + if hasattr(self, + 'document_passages') and self.document_passages is not None: + document_passages_list = [] + for v in self.document_passages: + if isinstance(v, dict): + document_passages_list.append(v) + else: + document_passages_list.append(v.to_dict()) + _dict['document_passages'] = document_passages_list + for k in [ + _k for _k in vars(self).keys() + if _k not in QueryResult._properties + ]: + _dict[k] = getattr(self, k) + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def get_properties(self) -> Dict: + """Return the additional properties from this instance of QueryResult in the form of a dict.""" + _dict = {} + for k in [ + _k for _k in vars(self).keys() + if _k not in QueryResult._properties + ]: + _dict[k] = getattr(self, k) + return _dict + + def set_properties(self, _dict: dict): + """Set a dictionary of additional properties in this instance of QueryResult""" + for k in [ + _k for _k in vars(self).keys() + if _k not in QueryResult._properties + ]: + delattr(self, k) + for k, v in _dict.items(): + if k not in QueryResult._properties: + if not isinstance(v, object): + raise ValueError( + 'Value for additional property {} must be of type object' + .format(k)) + setattr(self, k, v) + else: + raise ValueError( + 'Property {} cannot be specified as an additional property'. + format(k)) + + def __str__(self) -> str: + """Return a `str` version of this QueryResult object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'QueryResult') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'QueryResult') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class QueryResultMetadata: + """ + Metadata of a query result. + + :param str document_retrieval_source: (optional) The document retrieval source + that produced this search result. + :param str collection_id: The collection id associated with this training data + set. + :param float confidence: (optional) The confidence score for the given result. + Calculated based on how relevant the result is estimated to be. The score can + range from `0.0` to `1.0`. The higher the number, the more relevant the + document. The `confidence` value for a result was calculated using the model + specified in the `document_retrieval_strategy` field of the result set. This + field is returned only if the **natural_language_query** parameter is specified + in the query. + """ + + def __init__( + self, + collection_id: str, + *, + document_retrieval_source: Optional[str] = None, + confidence: Optional[float] = None, + ) -> None: + """ + Initialize a QueryResultMetadata object. + + :param str collection_id: The collection id associated with this training + data set. + :param str document_retrieval_source: (optional) The document retrieval + source that produced this search result. + :param float confidence: (optional) The confidence score for the given + result. Calculated based on how relevant the result is estimated to be. The + score can range from `0.0` to `1.0`. The higher the number, the more + relevant the document. The `confidence` value for a result was calculated + using the model specified in the `document_retrieval_strategy` field of the + result set. This field is returned only if the **natural_language_query** + parameter is specified in the query. + """ + self.document_retrieval_source = document_retrieval_source + self.collection_id = collection_id + self.confidence = confidence + + @classmethod + def from_dict(cls, _dict: Dict) -> 'QueryResultMetadata': + """Initialize a QueryResultMetadata object from a json dictionary.""" + args = {} + if (document_retrieval_source := + _dict.get('document_retrieval_source')) is not None: + args['document_retrieval_source'] = document_retrieval_source + if (collection_id := _dict.get('collection_id')) is not None: + args['collection_id'] = collection_id + else: + raise ValueError( + 'Required property \'collection_id\' not present in QueryResultMetadata JSON' + ) + if (confidence := _dict.get('confidence')) is not None: + args['confidence'] = confidence + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a QueryResultMetadata object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'document_retrieval_source' + ) and self.document_retrieval_source is not None: + _dict['document_retrieval_source'] = self.document_retrieval_source + if hasattr(self, 'collection_id') and self.collection_id is not None: + _dict['collection_id'] = self.collection_id + if hasattr(self, 'confidence') and self.confidence is not None: + _dict['confidence'] = self.confidence + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this QueryResultMetadata object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'QueryResultMetadata') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'QueryResultMetadata') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class DocumentRetrievalSourceEnum(str, Enum): + """ + The document retrieval source that produced this search result. + """ + + SEARCH = 'search' + CURATION = 'curation' + + +class QueryResultPassage: + """ + A passage query result. + + :param str passage_text: (optional) The content of the extracted passage. + :param int start_offset: (optional) The position of the first character of the + extracted passage in the originating field. + :param int end_offset: (optional) The position after the last character of the + extracted passage in the originating field. + :param str field: (optional) The label of the field from which the passage has + been extracted. + :param List[ResultPassageAnswer] answers: (optional) An arry of extracted + answers to the specified query. Returned for natural language queries when + **passages.per_document** is `true`. + """ + + def __init__( + self, + *, + passage_text: Optional[str] = None, + start_offset: Optional[int] = None, + end_offset: Optional[int] = None, + field: Optional[str] = None, + answers: Optional[List['ResultPassageAnswer']] = None, + ) -> None: + """ + Initialize a QueryResultPassage object. + + :param str passage_text: (optional) The content of the extracted passage. + :param int start_offset: (optional) The position of the first character of + the extracted passage in the originating field. + :param int end_offset: (optional) The position after the last character of + the extracted passage in the originating field. + :param str field: (optional) The label of the field from which the passage + has been extracted. + :param List[ResultPassageAnswer] answers: (optional) An arry of extracted + answers to the specified query. Returned for natural language queries when + **passages.per_document** is `true`. + """ + self.passage_text = passage_text + self.start_offset = start_offset + self.end_offset = end_offset + self.field = field + self.answers = answers + + @classmethod + def from_dict(cls, _dict: Dict) -> 'QueryResultPassage': + """Initialize a QueryResultPassage object from a json dictionary.""" + args = {} + if (passage_text := _dict.get('passage_text')) is not None: + args['passage_text'] = passage_text + if (start_offset := _dict.get('start_offset')) is not None: + args['start_offset'] = start_offset + if (end_offset := _dict.get('end_offset')) is not None: + args['end_offset'] = end_offset + if (field := _dict.get('field')) is not None: + args['field'] = field + if (answers := _dict.get('answers')) is not None: + args['answers'] = [ + ResultPassageAnswer.from_dict(v) for v in answers + ] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a QueryResultPassage object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'passage_text') and self.passage_text is not None: + _dict['passage_text'] = self.passage_text + if hasattr(self, 'start_offset') and self.start_offset is not None: + _dict['start_offset'] = self.start_offset + if hasattr(self, 'end_offset') and self.end_offset is not None: + _dict['end_offset'] = self.end_offset + if hasattr(self, 'field') and self.field is not None: + _dict['field'] = self.field + if hasattr(self, 'answers') and self.answers is not None: + answers_list = [] + for v in self.answers: + if isinstance(v, dict): + answers_list.append(v) + else: + answers_list.append(v.to_dict()) + _dict['answers'] = answers_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this QueryResultPassage object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'QueryResultPassage') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'QueryResultPassage') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class QuerySuggestedRefinement: + """ + A suggested additional query term or terms user to filter results. **Note**: The + `suggested_refinements` parameter is deprecated. + + :param str text: (optional) The text used to filter. + """ + + def __init__( + self, + *, + text: Optional[str] = None, + ) -> None: + """ + Initialize a QuerySuggestedRefinement object. + + :param str text: (optional) The text used to filter. + """ + self.text = text + + @classmethod + def from_dict(cls, _dict: Dict) -> 'QuerySuggestedRefinement': + """Initialize a QuerySuggestedRefinement object from a json dictionary.""" + args = {} + if (text := _dict.get('text')) is not None: + args['text'] = text + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a QuerySuggestedRefinement object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'text') and self.text is not None: + _dict['text'] = self.text + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this QuerySuggestedRefinement object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'QuerySuggestedRefinement') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'QuerySuggestedRefinement') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class QueryTableResult: + """ + A tables whose content or context match a search query. + + :param str table_id: (optional) The identifier for the retrieved table. + :param str source_document_id: (optional) The identifier of the document the + table was retrieved from. + :param str collection_id: (optional) The identifier of the collection the table + was retrieved from. + :param str table_html: (optional) HTML snippet of the table info. + :param int table_html_offset: (optional) The offset of the table html snippet in + the original document html. + :param TableResultTable table: (optional) Full table object retrieved from Table + Understanding Enrichment. + """ + + def __init__( + self, + *, + table_id: Optional[str] = None, + source_document_id: Optional[str] = None, + collection_id: Optional[str] = None, + table_html: Optional[str] = None, + table_html_offset: Optional[int] = None, + table: Optional['TableResultTable'] = None, + ) -> None: + """ + Initialize a QueryTableResult object. + + :param str table_id: (optional) The identifier for the retrieved table. + :param str source_document_id: (optional) The identifier of the document + the table was retrieved from. + :param str collection_id: (optional) The identifier of the collection the + table was retrieved from. + :param str table_html: (optional) HTML snippet of the table info. + :param int table_html_offset: (optional) The offset of the table html + snippet in the original document html. + :param TableResultTable table: (optional) Full table object retrieved from + Table Understanding Enrichment. + """ + self.table_id = table_id + self.source_document_id = source_document_id + self.collection_id = collection_id + self.table_html = table_html + self.table_html_offset = table_html_offset + self.table = table + + @classmethod + def from_dict(cls, _dict: Dict) -> 'QueryTableResult': + """Initialize a QueryTableResult object from a json dictionary.""" + args = {} + if (table_id := _dict.get('table_id')) is not None: + args['table_id'] = table_id + if (source_document_id := _dict.get('source_document_id')) is not None: + args['source_document_id'] = source_document_id + if (collection_id := _dict.get('collection_id')) is not None: + args['collection_id'] = collection_id + if (table_html := _dict.get('table_html')) is not None: + args['table_html'] = table_html + if (table_html_offset := _dict.get('table_html_offset')) is not None: + args['table_html_offset'] = table_html_offset + if (table := _dict.get('table')) is not None: + args['table'] = TableResultTable.from_dict(table) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a QueryTableResult object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'table_id') and self.table_id is not None: + _dict['table_id'] = self.table_id + if hasattr( + self, + 'source_document_id') and self.source_document_id is not None: + _dict['source_document_id'] = self.source_document_id + if hasattr(self, 'collection_id') and self.collection_id is not None: + _dict['collection_id'] = self.collection_id + if hasattr(self, 'table_html') and self.table_html is not None: + _dict['table_html'] = self.table_html + if hasattr(self, + 'table_html_offset') and self.table_html_offset is not None: + _dict['table_html_offset'] = self.table_html_offset + if hasattr(self, 'table') and self.table is not None: + if isinstance(self.table, dict): + _dict['table'] = self.table + else: + _dict['table'] = self.table.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this QueryTableResult object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'QueryTableResult') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'QueryTableResult') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class QueryTermAggregationResult: + """ + Top value result for the `term` aggregation. + + :param str key: Value of the field with a nonzero frequency in the document set. + :param int matching_results: Number of documents that contain the 'key'. + :param float relevancy: (optional) The relevancy score for this result. Returned + only if `relevancy:true` is specified in the request. + :param int total_matching_documents: (optional) Number of documents in the + collection that contain the term in the specified field. Returned only when + `relevancy:true` is specified in the request. + :param float estimated_matching_results: (optional) Number of documents that are + estimated to match the query and also meet the condition. Returned only when + `relevancy:true` is specified in the request. + :param List[dict] aggregations: (optional) An array of subaggregations. Returned + only when this aggregation is combined with other aggregations in the request or + is returned as a subaggregation. + """ + + def __init__( + self, + key: str, + matching_results: int, + *, + relevancy: Optional[float] = None, + total_matching_documents: Optional[int] = None, + estimated_matching_results: Optional[float] = None, + aggregations: Optional[List[dict]] = None, + ) -> None: + """ + Initialize a QueryTermAggregationResult object. + + :param str key: Value of the field with a nonzero frequency in the document + set. + :param int matching_results: Number of documents that contain the 'key'. + :param float relevancy: (optional) The relevancy score for this result. + Returned only if `relevancy:true` is specified in the request. + :param int total_matching_documents: (optional) Number of documents in the + collection that contain the term in the specified field. Returned only when + `relevancy:true` is specified in the request. + :param float estimated_matching_results: (optional) Number of documents + that are estimated to match the query and also meet the condition. Returned + only when `relevancy:true` is specified in the request. + :param List[dict] aggregations: (optional) An array of subaggregations. + Returned only when this aggregation is combined with other aggregations in + the request or is returned as a subaggregation. + """ + self.key = key + self.matching_results = matching_results + self.relevancy = relevancy + self.total_matching_documents = total_matching_documents + self.estimated_matching_results = estimated_matching_results + self.aggregations = aggregations + + @classmethod + def from_dict(cls, _dict: Dict) -> 'QueryTermAggregationResult': + """Initialize a QueryTermAggregationResult object from a json dictionary.""" + args = {} + if (key := _dict.get('key')) is not None: + args['key'] = key + else: + raise ValueError( + 'Required property \'key\' not present in QueryTermAggregationResult JSON' + ) + if (matching_results := _dict.get('matching_results')) is not None: + args['matching_results'] = matching_results + else: + raise ValueError( + 'Required property \'matching_results\' not present in QueryTermAggregationResult JSON' + ) + if (relevancy := _dict.get('relevancy')) is not None: + args['relevancy'] = relevancy + if (total_matching_documents := + _dict.get('total_matching_documents')) is not None: + args['total_matching_documents'] = total_matching_documents + if (estimated_matching_results := + _dict.get('estimated_matching_results')) is not None: + args['estimated_matching_results'] = estimated_matching_results + if (aggregations := _dict.get('aggregations')) is not None: + args['aggregations'] = aggregations + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a QueryTermAggregationResult object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'key') and self.key is not None: + _dict['key'] = self.key + if hasattr(self, + 'matching_results') and self.matching_results is not None: + _dict['matching_results'] = self.matching_results + if hasattr(self, 'relevancy') and self.relevancy is not None: + _dict['relevancy'] = self.relevancy + if hasattr(self, 'total_matching_documents' + ) and self.total_matching_documents is not None: + _dict['total_matching_documents'] = self.total_matching_documents + if hasattr(self, 'estimated_matching_results' + ) and self.estimated_matching_results is not None: + _dict[ + 'estimated_matching_results'] = self.estimated_matching_results + if hasattr(self, 'aggregations') and self.aggregations is not None: + _dict['aggregations'] = self.aggregations + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this QueryTermAggregationResult object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'QueryTermAggregationResult') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'QueryTermAggregationResult') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class QueryTimesliceAggregationResult: + """ + A timeslice interval segment. + + :param str key_as_string: String date value of the upper bound for the timeslice + interval in ISO-8601 format. + :param int key: Numeric date value of the upper bound for the timeslice interval + in UNIX milliseconds since epoch. + :param int matching_results: Number of documents with the specified key as the + upper bound. + :param List[dict] aggregations: (optional) An array of subaggregations. Returned + only when this aggregation is returned as a subaggregation. + """ + + def __init__( + self, + key_as_string: str, + key: int, + matching_results: int, + *, + aggregations: Optional[List[dict]] = None, + ) -> None: + """ + Initialize a QueryTimesliceAggregationResult object. + + :param str key_as_string: String date value of the upper bound for the + timeslice interval in ISO-8601 format. + :param int key: Numeric date value of the upper bound for the timeslice + interval in UNIX milliseconds since epoch. + :param int matching_results: Number of documents with the specified key as + the upper bound. + :param List[dict] aggregations: (optional) An array of subaggregations. + Returned only when this aggregation is returned as a subaggregation. + """ + self.key_as_string = key_as_string + self.key = key + self.matching_results = matching_results + self.aggregations = aggregations + + @classmethod + def from_dict(cls, _dict: Dict) -> 'QueryTimesliceAggregationResult': + """Initialize a QueryTimesliceAggregationResult object from a json dictionary.""" + args = {} + if (key_as_string := _dict.get('key_as_string')) is not None: + args['key_as_string'] = key_as_string + else: + raise ValueError( + 'Required property \'key_as_string\' not present in QueryTimesliceAggregationResult JSON' + ) + if (key := _dict.get('key')) is not None: + args['key'] = key + else: + raise ValueError( + 'Required property \'key\' not present in QueryTimesliceAggregationResult JSON' + ) + if (matching_results := _dict.get('matching_results')) is not None: + args['matching_results'] = matching_results + else: + raise ValueError( + 'Required property \'matching_results\' not present in QueryTimesliceAggregationResult JSON' + ) + if (aggregations := _dict.get('aggregations')) is not None: + args['aggregations'] = aggregations + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a QueryTimesliceAggregationResult object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'key_as_string') and self.key_as_string is not None: + _dict['key_as_string'] = self.key_as_string + if hasattr(self, 'key') and self.key is not None: + _dict['key'] = self.key + if hasattr(self, + 'matching_results') and self.matching_results is not None: + _dict['matching_results'] = self.matching_results + if hasattr(self, 'aggregations') and self.aggregations is not None: + _dict['aggregations'] = self.aggregations + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this QueryTimesliceAggregationResult object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'QueryTimesliceAggregationResult') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'QueryTimesliceAggregationResult') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class QueryTopHitsAggregationResult: + """ + A query response that contains the matching documents for the preceding aggregations. + + :param int matching_results: Number of matching results. + :param List[dict] hits: (optional) An array of the document results in an + ordered list. + """ + + def __init__( + self, + matching_results: int, + *, + hits: Optional[List[dict]] = None, + ) -> None: + """ + Initialize a QueryTopHitsAggregationResult object. + + :param int matching_results: Number of matching results. + :param List[dict] hits: (optional) An array of the document results in an + ordered list. + """ + self.matching_results = matching_results + self.hits = hits + + @classmethod + def from_dict(cls, _dict: Dict) -> 'QueryTopHitsAggregationResult': + """Initialize a QueryTopHitsAggregationResult object from a json dictionary.""" + args = {} + if (matching_results := _dict.get('matching_results')) is not None: + args['matching_results'] = matching_results + else: + raise ValueError( + 'Required property \'matching_results\' not present in QueryTopHitsAggregationResult JSON' + ) + if (hits := _dict.get('hits')) is not None: + args['hits'] = hits + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a QueryTopHitsAggregationResult object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, + 'matching_results') and self.matching_results is not None: + _dict['matching_results'] = self.matching_results + if hasattr(self, 'hits') and self.hits is not None: + _dict['hits'] = self.hits + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this QueryTopHitsAggregationResult object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'QueryTopHitsAggregationResult') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'QueryTopHitsAggregationResult') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class QueryTopicAggregationResult: + """ + Result for the `topic` aggregation. + + :param List[dict] aggregations: (optional) Array of subaggregations of type + `term` or `group_by` and `timeslice`. Each element of the matrix that is + returned contains a **topic_indicator** that is calculated from the combination + of each aggregation value and segment of time. + """ + + def __init__( + self, + *, + aggregations: Optional[List[dict]] = None, + ) -> None: + """ + Initialize a QueryTopicAggregationResult object. + + :param List[dict] aggregations: (optional) Array of subaggregations of + type `term` or `group_by` and `timeslice`. Each element of the matrix that + is returned contains a **topic_indicator** that is calculated from the + combination of each aggregation value and segment of time. + """ + self.aggregations = aggregations + + @classmethod + def from_dict(cls, _dict: Dict) -> 'QueryTopicAggregationResult': + """Initialize a QueryTopicAggregationResult object from a json dictionary.""" + args = {} + if (aggregations := _dict.get('aggregations')) is not None: + args['aggregations'] = aggregations + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a QueryTopicAggregationResult object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'aggregations') and self.aggregations is not None: + _dict['aggregations'] = self.aggregations + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this QueryTopicAggregationResult object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'QueryTopicAggregationResult') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'QueryTopicAggregationResult') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class QueryTrendAggregationResult: + """ + Result for the `trend` aggregation. + + :param List[dict] aggregations: (optional) Array of subaggregations of type + `term` or `group_by` and `timeslice`. Each element of the matrix that is + returned contains a **trend_indicator** that is calculated from the combination + of each aggregation value and segment of time. + """ + + def __init__( + self, + *, + aggregations: Optional[List[dict]] = None, + ) -> None: + """ + Initialize a QueryTrendAggregationResult object. + + :param List[dict] aggregations: (optional) Array of subaggregations of type + `term` or `group_by` and `timeslice`. Each element of the matrix that is + returned contains a **trend_indicator** that is calculated from the + combination of each aggregation value and segment of time. + """ + self.aggregations = aggregations + + @classmethod + def from_dict(cls, _dict: Dict) -> 'QueryTrendAggregationResult': + """Initialize a QueryTrendAggregationResult object from a json dictionary.""" + args = {} + if (aggregations := _dict.get('aggregations')) is not None: + args['aggregations'] = aggregations + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a QueryTrendAggregationResult object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'aggregations') and self.aggregations is not None: + _dict['aggregations'] = self.aggregations + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this QueryTrendAggregationResult object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'QueryTrendAggregationResult') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'QueryTrendAggregationResult') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ResultPassageAnswer: + """ + Object that contains a potential answer to the specified query. + + :param str answer_text: (optional) Answer text for the specified query as + identified by Discovery. + :param int start_offset: (optional) The position of the first character of the + extracted answer in the originating field. + :param int end_offset: (optional) The position after the last character of the + extracted answer in the originating field. + :param float confidence: (optional) An estimate of the probability that the + answer is relevant. + """ + + def __init__( + self, + *, + answer_text: Optional[str] = None, + start_offset: Optional[int] = None, + end_offset: Optional[int] = None, + confidence: Optional[float] = None, + ) -> None: + """ + Initialize a ResultPassageAnswer object. + + :param str answer_text: (optional) Answer text for the specified query as + identified by Discovery. + :param int start_offset: (optional) The position of the first character of + the extracted answer in the originating field. + :param int end_offset: (optional) The position after the last character of + the extracted answer in the originating field. + :param float confidence: (optional) An estimate of the probability that the + answer is relevant. + """ + self.answer_text = answer_text + self.start_offset = start_offset + self.end_offset = end_offset + self.confidence = confidence + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ResultPassageAnswer': + """Initialize a ResultPassageAnswer object from a json dictionary.""" + args = {} + if (answer_text := _dict.get('answer_text')) is not None: + args['answer_text'] = answer_text + if (start_offset := _dict.get('start_offset')) is not None: + args['start_offset'] = start_offset + if (end_offset := _dict.get('end_offset')) is not None: + args['end_offset'] = end_offset + if (confidence := _dict.get('confidence')) is not None: + args['confidence'] = confidence + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ResultPassageAnswer object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'answer_text') and self.answer_text is not None: + _dict['answer_text'] = self.answer_text + if hasattr(self, 'start_offset') and self.start_offset is not None: + _dict['start_offset'] = self.start_offset + if hasattr(self, 'end_offset') and self.end_offset is not None: + _dict['end_offset'] = self.end_offset + if hasattr(self, 'confidence') and self.confidence is not None: + _dict['confidence'] = self.confidence + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ResultPassageAnswer object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ResultPassageAnswer') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ResultPassageAnswer') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class RetrievalDetails: + """ + An object contain retrieval type information. + + :param str document_retrieval_strategy: (optional) Identifies the document + retrieval strategy used for this query. `relevancy_training` indicates that the + results were returned using a relevancy trained model. + **Note**: In the event of trained collections being queried, but the trained + model is not used to return results, the **document_retrieval_strategy** is + listed as `untrained`. + """ + + def __init__( + self, + *, + document_retrieval_strategy: Optional[str] = None, + ) -> None: + """ + Initialize a RetrievalDetails object. + + :param str document_retrieval_strategy: (optional) Identifies the document + retrieval strategy used for this query. `relevancy_training` indicates that + the results were returned using a relevancy trained model. + **Note**: In the event of trained collections being queried, but the + trained model is not used to return results, the + **document_retrieval_strategy** is listed as `untrained`. + """ + self.document_retrieval_strategy = document_retrieval_strategy + + @classmethod + def from_dict(cls, _dict: Dict) -> 'RetrievalDetails': + """Initialize a RetrievalDetails object from a json dictionary.""" + args = {} + if (document_retrieval_strategy := + _dict.get('document_retrieval_strategy')) is not None: + args['document_retrieval_strategy'] = document_retrieval_strategy + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a RetrievalDetails object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'document_retrieval_strategy' + ) and self.document_retrieval_strategy is not None: + _dict[ + 'document_retrieval_strategy'] = self.document_retrieval_strategy + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this RetrievalDetails object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'RetrievalDetails') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'RetrievalDetails') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class DocumentRetrievalStrategyEnum(str, Enum): + """ + Identifies the document retrieval strategy used for this query. + `relevancy_training` indicates that the results were returned using a relevancy + trained model. + **Note**: In the event of trained collections being queried, but the trained model + is not used to return results, the **document_retrieval_strategy** is listed as + `untrained`. + """ + + UNTRAINED = 'untrained' + RELEVANCY_TRAINING = 'relevancy_training' + + +class StopWordList: + """ + List of words to filter out of text that is submitted in queries. + + :param List[str] stopwords: List of stop words. + """ + + def __init__( + self, + stopwords: List[str], + ) -> None: + """ + Initialize a StopWordList object. + + :param List[str] stopwords: List of stop words. + """ + self.stopwords = stopwords + + @classmethod + def from_dict(cls, _dict: Dict) -> 'StopWordList': + """Initialize a StopWordList object from a json dictionary.""" + args = {} + if (stopwords := _dict.get('stopwords')) is not None: + args['stopwords'] = stopwords + else: + raise ValueError( + 'Required property \'stopwords\' not present in StopWordList JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a StopWordList object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'stopwords') and self.stopwords is not None: + _dict['stopwords'] = self.stopwords + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this StopWordList object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'StopWordList') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'StopWordList') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class TableBodyCells: + """ + Cells that are not table header, column header, or row header cells. + + :param str cell_id: (optional) The unique ID of the cell in the current table. + :param TableElementLocation location: (optional) The numeric location of the + identified element in the document, represented with two integers labeled + `begin` and `end`. + :param str text: (optional) The textual contents of this cell from the input + document without associated markup content. + :param int row_index_begin: (optional) The `begin` index of this cell's `row` + location in the current table. + :param int row_index_end: (optional) The `end` index of this cell's `row` + location in the current table. + :param int column_index_begin: (optional) The `begin` index of this cell's + `column` location in the current table. + :param int column_index_end: (optional) The `end` index of this cell's `column` + location in the current table. + :param List[str] row_header_ids: (optional) A list of ID values that represent + the table row headers that are associated with this body cell. + :param List[str] row_header_texts: (optional) A list of row header values that + are associated with this body cell. + :param List[str] row_header_texts_normalized: (optional) A list of normalized + row header values that are associated with this body cell. + :param List[str] column_header_ids: (optional) A list of ID values that + represent the column headers that are associated with this body cell. + :param List[str] column_header_texts: (optional) A list of column header values + that are associated with this body cell. + :param List[str] column_header_texts_normalized: (optional) A list of normalized + column header values that are associated with this body cell. + :param List[DocumentAttribute] attributes: (optional) A list of document + attributes. + """ + + def __init__( + self, + *, + cell_id: Optional[str] = None, + location: Optional['TableElementLocation'] = None, + text: Optional[str] = None, + row_index_begin: Optional[int] = None, + row_index_end: Optional[int] = None, + column_index_begin: Optional[int] = None, + column_index_end: Optional[int] = None, + row_header_ids: Optional[List[str]] = None, + row_header_texts: Optional[List[str]] = None, + row_header_texts_normalized: Optional[List[str]] = None, + column_header_ids: Optional[List[str]] = None, + column_header_texts: Optional[List[str]] = None, + column_header_texts_normalized: Optional[List[str]] = None, + attributes: Optional[List['DocumentAttribute']] = None, + ) -> None: + """ + Initialize a TableBodyCells object. + + :param str cell_id: (optional) The unique ID of the cell in the current + table. + :param TableElementLocation location: (optional) The numeric location of + the identified element in the document, represented with two integers + labeled `begin` and `end`. + :param str text: (optional) The textual contents of this cell from the + input document without associated markup content. + :param int row_index_begin: (optional) The `begin` index of this cell's + `row` location in the current table. + :param int row_index_end: (optional) The `end` index of this cell's `row` + location in the current table. + :param int column_index_begin: (optional) The `begin` index of this cell's + `column` location in the current table. + :param int column_index_end: (optional) The `end` index of this cell's + `column` location in the current table. + :param List[str] row_header_ids: (optional) A list of ID values that + represent the table row headers that are associated with this body cell. + :param List[str] row_header_texts: (optional) A list of row header values + that are associated with this body cell. + :param List[str] row_header_texts_normalized: (optional) A list of + normalized row header values that are associated with this body cell. + :param List[str] column_header_ids: (optional) A list of ID values that + represent the column headers that are associated with this body cell. + :param List[str] column_header_texts: (optional) A list of column header + values that are associated with this body cell. + :param List[str] column_header_texts_normalized: (optional) A list of + normalized column header values that are associated with this body cell. + :param List[DocumentAttribute] attributes: (optional) A list of document + attributes. + """ + self.cell_id = cell_id + self.location = location + self.text = text + self.row_index_begin = row_index_begin + self.row_index_end = row_index_end + self.column_index_begin = column_index_begin + self.column_index_end = column_index_end + self.row_header_ids = row_header_ids + self.row_header_texts = row_header_texts + self.row_header_texts_normalized = row_header_texts_normalized + self.column_header_ids = column_header_ids + self.column_header_texts = column_header_texts + self.column_header_texts_normalized = column_header_texts_normalized + self.attributes = attributes + + @classmethod + def from_dict(cls, _dict: Dict) -> 'TableBodyCells': + """Initialize a TableBodyCells object from a json dictionary.""" + args = {} + if (cell_id := _dict.get('cell_id')) is not None: + args['cell_id'] = cell_id + if (location := _dict.get('location')) is not None: + args['location'] = TableElementLocation.from_dict(location) + if (text := _dict.get('text')) is not None: + args['text'] = text + if (row_index_begin := _dict.get('row_index_begin')) is not None: + args['row_index_begin'] = row_index_begin + if (row_index_end := _dict.get('row_index_end')) is not None: + args['row_index_end'] = row_index_end + if (column_index_begin := _dict.get('column_index_begin')) is not None: + args['column_index_begin'] = column_index_begin + if (column_index_end := _dict.get('column_index_end')) is not None: + args['column_index_end'] = column_index_end + if (row_header_ids := _dict.get('row_header_ids')) is not None: + args['row_header_ids'] = row_header_ids + if (row_header_texts := _dict.get('row_header_texts')) is not None: + args['row_header_texts'] = row_header_texts + if (row_header_texts_normalized := + _dict.get('row_header_texts_normalized')) is not None: + args['row_header_texts_normalized'] = row_header_texts_normalized + if (column_header_ids := _dict.get('column_header_ids')) is not None: + args['column_header_ids'] = column_header_ids + if (column_header_texts := + _dict.get('column_header_texts')) is not None: + args['column_header_texts'] = column_header_texts + if (column_header_texts_normalized := + _dict.get('column_header_texts_normalized')) is not None: + args[ + 'column_header_texts_normalized'] = column_header_texts_normalized + if (attributes := _dict.get('attributes')) is not None: + args['attributes'] = [ + DocumentAttribute.from_dict(v) for v in attributes + ] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a TableBodyCells object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'cell_id') and self.cell_id is not None: + _dict['cell_id'] = self.cell_id + if hasattr(self, 'location') and self.location is not None: + if isinstance(self.location, dict): + _dict['location'] = self.location + else: + _dict['location'] = self.location.to_dict() + if hasattr(self, 'text') and self.text is not None: + _dict['text'] = self.text + if hasattr(self, + 'row_index_begin') and self.row_index_begin is not None: + _dict['row_index_begin'] = self.row_index_begin + if hasattr(self, 'row_index_end') and self.row_index_end is not None: + _dict['row_index_end'] = self.row_index_end + if hasattr( + self, + 'column_index_begin') and self.column_index_begin is not None: + _dict['column_index_begin'] = self.column_index_begin + if hasattr(self, + 'column_index_end') and self.column_index_end is not None: + _dict['column_index_end'] = self.column_index_end + if hasattr(self, 'row_header_ids') and self.row_header_ids is not None: + _dict['row_header_ids'] = self.row_header_ids + if hasattr(self, + 'row_header_texts') and self.row_header_texts is not None: + _dict['row_header_texts'] = self.row_header_texts + if hasattr(self, 'row_header_texts_normalized' + ) and self.row_header_texts_normalized is not None: + _dict[ + 'row_header_texts_normalized'] = self.row_header_texts_normalized + if hasattr(self, + 'column_header_ids') and self.column_header_ids is not None: + _dict['column_header_ids'] = self.column_header_ids + if hasattr( + self, + 'column_header_texts') and self.column_header_texts is not None: + _dict['column_header_texts'] = self.column_header_texts + if hasattr(self, 'column_header_texts_normalized' + ) and self.column_header_texts_normalized is not None: + _dict[ + 'column_header_texts_normalized'] = self.column_header_texts_normalized + if hasattr(self, 'attributes') and self.attributes is not None: + attributes_list = [] + for v in self.attributes: + if isinstance(v, dict): + attributes_list.append(v) + else: + attributes_list.append(v.to_dict()) + _dict['attributes'] = attributes_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this TableBodyCells object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'TableBodyCells') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'TableBodyCells') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class TableCellKey: + """ + A key in a key-value pair. + + :param str cell_id: (optional) The unique ID of the key in the table. + :param TableElementLocation location: (optional) The numeric location of the + identified element in the document, represented with two integers labeled + `begin` and `end`. + :param str text: (optional) The text content of the table cell without HTML + markup. + """ + + def __init__( + self, + *, + cell_id: Optional[str] = None, + location: Optional['TableElementLocation'] = None, + text: Optional[str] = None, + ) -> None: + """ + Initialize a TableCellKey object. + + :param str cell_id: (optional) The unique ID of the key in the table. + :param TableElementLocation location: (optional) The numeric location of + the identified element in the document, represented with two integers + labeled `begin` and `end`. + :param str text: (optional) The text content of the table cell without HTML + markup. + """ + self.cell_id = cell_id + self.location = location + self.text = text + + @classmethod + def from_dict(cls, _dict: Dict) -> 'TableCellKey': + """Initialize a TableCellKey object from a json dictionary.""" + args = {} + if (cell_id := _dict.get('cell_id')) is not None: + args['cell_id'] = cell_id + if (location := _dict.get('location')) is not None: + args['location'] = TableElementLocation.from_dict(location) + if (text := _dict.get('text')) is not None: + args['text'] = text + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a TableCellKey object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'cell_id') and self.cell_id is not None: + _dict['cell_id'] = self.cell_id + if hasattr(self, 'location') and self.location is not None: + if isinstance(self.location, dict): + _dict['location'] = self.location + else: + _dict['location'] = self.location.to_dict() + if hasattr(self, 'text') and self.text is not None: + _dict['text'] = self.text + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this TableCellKey object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'TableCellKey') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'TableCellKey') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class TableCellValues: + """ + A value in a key-value pair. + + :param str cell_id: (optional) The unique ID of the value in the table. + :param TableElementLocation location: (optional) The numeric location of the + identified element in the document, represented with two integers labeled + `begin` and `end`. + :param str text: (optional) The text content of the table cell without HTML + markup. + """ + + def __init__( + self, + *, + cell_id: Optional[str] = None, + location: Optional['TableElementLocation'] = None, + text: Optional[str] = None, + ) -> None: + """ + Initialize a TableCellValues object. + + :param str cell_id: (optional) The unique ID of the value in the table. + :param TableElementLocation location: (optional) The numeric location of + the identified element in the document, represented with two integers + labeled `begin` and `end`. + :param str text: (optional) The text content of the table cell without HTML + markup. + """ + self.cell_id = cell_id + self.location = location + self.text = text + + @classmethod + def from_dict(cls, _dict: Dict) -> 'TableCellValues': + """Initialize a TableCellValues object from a json dictionary.""" + args = {} + if (cell_id := _dict.get('cell_id')) is not None: + args['cell_id'] = cell_id + if (location := _dict.get('location')) is not None: + args['location'] = TableElementLocation.from_dict(location) + if (text := _dict.get('text')) is not None: + args['text'] = text + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a TableCellValues object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'cell_id') and self.cell_id is not None: + _dict['cell_id'] = self.cell_id + if hasattr(self, 'location') and self.location is not None: + if isinstance(self.location, dict): + _dict['location'] = self.location + else: + _dict['location'] = self.location.to_dict() + if hasattr(self, 'text') and self.text is not None: + _dict['text'] = self.text + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this TableCellValues object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'TableCellValues') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'TableCellValues') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class TableColumnHeaders: + """ + Column-level cells, each applicable as a header to other cells in the same column as + itself, of the current table. + + :param str cell_id: (optional) The unique ID of the cell in the current table. + :param TableElementLocation location: (optional) The numeric location of the + identified element in the document, represented with two integers labeled + `begin` and `end`. + :param str text: (optional) The textual contents of this cell from the input + document without associated markup content. + :param str text_normalized: (optional) Normalized column header text. + :param int row_index_begin: (optional) The `begin` index of this cell's `row` + location in the current table. + :param int row_index_end: (optional) The `end` index of this cell's `row` + location in the current table. + :param int column_index_begin: (optional) The `begin` index of this cell's + `column` location in the current table. + :param int column_index_end: (optional) The `end` index of this cell's `column` + location in the current table. + """ + + def __init__( + self, + *, + cell_id: Optional[str] = None, + location: Optional['TableElementLocation'] = None, + text: Optional[str] = None, + text_normalized: Optional[str] = None, + row_index_begin: Optional[int] = None, + row_index_end: Optional[int] = None, + column_index_begin: Optional[int] = None, + column_index_end: Optional[int] = None, + ) -> None: + """ + Initialize a TableColumnHeaders object. + + :param str cell_id: (optional) The unique ID of the cell in the current + table. + :param TableElementLocation location: (optional) The numeric location of + the identified element in the document, represented with two integers + labeled `begin` and `end`. + :param str text: (optional) The textual contents of this cell from the + input document without associated markup content. + :param str text_normalized: (optional) Normalized column header text. + :param int row_index_begin: (optional) The `begin` index of this cell's + `row` location in the current table. + :param int row_index_end: (optional) The `end` index of this cell's `row` + location in the current table. + :param int column_index_begin: (optional) The `begin` index of this cell's + `column` location in the current table. + :param int column_index_end: (optional) The `end` index of this cell's + `column` location in the current table. + """ + self.cell_id = cell_id + self.location = location + self.text = text + self.text_normalized = text_normalized + self.row_index_begin = row_index_begin + self.row_index_end = row_index_end + self.column_index_begin = column_index_begin + self.column_index_end = column_index_end + + @classmethod + def from_dict(cls, _dict: Dict) -> 'TableColumnHeaders': + """Initialize a TableColumnHeaders object from a json dictionary.""" + args = {} + if (cell_id := _dict.get('cell_id')) is not None: + args['cell_id'] = cell_id + if (location := _dict.get('location')) is not None: + args['location'] = TableElementLocation.from_dict(location) + if (text := _dict.get('text')) is not None: + args['text'] = text + if (text_normalized := _dict.get('text_normalized')) is not None: + args['text_normalized'] = text_normalized + if (row_index_begin := _dict.get('row_index_begin')) is not None: + args['row_index_begin'] = row_index_begin + if (row_index_end := _dict.get('row_index_end')) is not None: + args['row_index_end'] = row_index_end + if (column_index_begin := _dict.get('column_index_begin')) is not None: + args['column_index_begin'] = column_index_begin + if (column_index_end := _dict.get('column_index_end')) is not None: + args['column_index_end'] = column_index_end + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a TableColumnHeaders object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'cell_id') and self.cell_id is not None: + _dict['cell_id'] = self.cell_id + if hasattr(self, 'location') and self.location is not None: + if isinstance(self.location, dict): + _dict['location'] = self.location + else: + _dict['location'] = self.location.to_dict() + if hasattr(self, 'text') and self.text is not None: + _dict['text'] = self.text + if hasattr(self, + 'text_normalized') and self.text_normalized is not None: + _dict['text_normalized'] = self.text_normalized + if hasattr(self, + 'row_index_begin') and self.row_index_begin is not None: + _dict['row_index_begin'] = self.row_index_begin + if hasattr(self, 'row_index_end') and self.row_index_end is not None: + _dict['row_index_end'] = self.row_index_end + if hasattr( + self, + 'column_index_begin') and self.column_index_begin is not None: + _dict['column_index_begin'] = self.column_index_begin + if hasattr(self, + 'column_index_end') and self.column_index_end is not None: + _dict['column_index_end'] = self.column_index_end + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this TableColumnHeaders object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'TableColumnHeaders') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'TableColumnHeaders') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class TableElementLocation: + """ + The numeric location of the identified element in the document, represented with two + integers labeled `begin` and `end`. + + :param int begin: The element's `begin` index. + :param int end: The element's `end` index. + """ + + def __init__( + self, + begin: int, + end: int, + ) -> None: + """ + Initialize a TableElementLocation object. + + :param int begin: The element's `begin` index. + :param int end: The element's `end` index. + """ + self.begin = begin + self.end = end + + @classmethod + def from_dict(cls, _dict: Dict) -> 'TableElementLocation': + """Initialize a TableElementLocation object from a json dictionary.""" + args = {} + if (begin := _dict.get('begin')) is not None: + args['begin'] = begin + else: + raise ValueError( + 'Required property \'begin\' not present in TableElementLocation JSON' + ) + if (end := _dict.get('end')) is not None: + args['end'] = end + else: + raise ValueError( + 'Required property \'end\' not present in TableElementLocation JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a TableElementLocation object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'begin') and self.begin is not None: + _dict['begin'] = self.begin + if hasattr(self, 'end') and self.end is not None: + _dict['end'] = self.end + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this TableElementLocation object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'TableElementLocation') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'TableElementLocation') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class TableHeaders: + """ + The contents of the current table's header. + + :param str cell_id: (optional) The unique ID of the cell in the current table. + :param TableElementLocation location: (optional) The numeric location of the + identified element in the document, represented with two integers labeled + `begin` and `end`. + :param str text: (optional) The textual contents of the cell from the input + document without associated markup content. + :param int row_index_begin: (optional) The `begin` index of this cell's `row` + location in the current table. + :param int row_index_end: (optional) The `end` index of this cell's `row` + location in the current table. + :param int column_index_begin: (optional) The `begin` index of this cell's + `column` location in the current table. + :param int column_index_end: (optional) The `end` index of this cell's `column` + location in the current table. + """ + + def __init__( + self, + *, + cell_id: Optional[str] = None, + location: Optional['TableElementLocation'] = None, + text: Optional[str] = None, + row_index_begin: Optional[int] = None, + row_index_end: Optional[int] = None, + column_index_begin: Optional[int] = None, + column_index_end: Optional[int] = None, + ) -> None: + """ + Initialize a TableHeaders object. + + :param str cell_id: (optional) The unique ID of the cell in the current + table. + :param TableElementLocation location: (optional) The numeric location of + the identified element in the document, represented with two integers + labeled `begin` and `end`. + :param str text: (optional) The textual contents of the cell from the input + document without associated markup content. + :param int row_index_begin: (optional) The `begin` index of this cell's + `row` location in the current table. + :param int row_index_end: (optional) The `end` index of this cell's `row` + location in the current table. + :param int column_index_begin: (optional) The `begin` index of this cell's + `column` location in the current table. + :param int column_index_end: (optional) The `end` index of this cell's + `column` location in the current table. + """ + self.cell_id = cell_id + self.location = location + self.text = text + self.row_index_begin = row_index_begin + self.row_index_end = row_index_end + self.column_index_begin = column_index_begin + self.column_index_end = column_index_end + + @classmethod + def from_dict(cls, _dict: Dict) -> 'TableHeaders': + """Initialize a TableHeaders object from a json dictionary.""" + args = {} + if (cell_id := _dict.get('cell_id')) is not None: + args['cell_id'] = cell_id + if (location := _dict.get('location')) is not None: + args['location'] = TableElementLocation.from_dict(location) + if (text := _dict.get('text')) is not None: + args['text'] = text + if (row_index_begin := _dict.get('row_index_begin')) is not None: + args['row_index_begin'] = row_index_begin + if (row_index_end := _dict.get('row_index_end')) is not None: + args['row_index_end'] = row_index_end + if (column_index_begin := _dict.get('column_index_begin')) is not None: + args['column_index_begin'] = column_index_begin + if (column_index_end := _dict.get('column_index_end')) is not None: + args['column_index_end'] = column_index_end + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a TableHeaders object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'cell_id') and self.cell_id is not None: + _dict['cell_id'] = self.cell_id + if hasattr(self, 'location') and self.location is not None: + if isinstance(self.location, dict): + _dict['location'] = self.location + else: + _dict['location'] = self.location.to_dict() + if hasattr(self, 'text') and self.text is not None: + _dict['text'] = self.text + if hasattr(self, + 'row_index_begin') and self.row_index_begin is not None: + _dict['row_index_begin'] = self.row_index_begin + if hasattr(self, 'row_index_end') and self.row_index_end is not None: + _dict['row_index_end'] = self.row_index_end + if hasattr( + self, + 'column_index_begin') and self.column_index_begin is not None: + _dict['column_index_begin'] = self.column_index_begin + if hasattr(self, + 'column_index_end') and self.column_index_end is not None: + _dict['column_index_end'] = self.column_index_end + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this TableHeaders object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'TableHeaders') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'TableHeaders') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class TableKeyValuePairs: + """ + Key-value pairs detected across cell boundaries. + + :param TableCellKey key: (optional) A key in a key-value pair. + :param List[TableCellValues] value: (optional) A list of values in a key-value + pair. + """ + + def __init__( + self, + *, + key: Optional['TableCellKey'] = None, + value: Optional[List['TableCellValues']] = None, + ) -> None: + """ + Initialize a TableKeyValuePairs object. + + :param TableCellKey key: (optional) A key in a key-value pair. + :param List[TableCellValues] value: (optional) A list of values in a + key-value pair. + """ + self.key = key + self.value = value + + @classmethod + def from_dict(cls, _dict: Dict) -> 'TableKeyValuePairs': + """Initialize a TableKeyValuePairs object from a json dictionary.""" + args = {} + if (key := _dict.get('key')) is not None: + args['key'] = TableCellKey.from_dict(key) + if (value := _dict.get('value')) is not None: + args['value'] = [TableCellValues.from_dict(v) for v in value] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a TableKeyValuePairs object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'key') and self.key is not None: + if isinstance(self.key, dict): + _dict['key'] = self.key + else: + _dict['key'] = self.key.to_dict() + if hasattr(self, 'value') and self.value is not None: + value_list = [] + for v in self.value: + if isinstance(v, dict): + value_list.append(v) + else: + value_list.append(v.to_dict()) + _dict['value'] = value_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this TableKeyValuePairs object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'TableKeyValuePairs') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'TableKeyValuePairs') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class TableResultTable: + """ + Full table object retrieved from Table Understanding Enrichment. + + :param TableElementLocation location: (optional) The numeric location of the + identified element in the document, represented with two integers labeled + `begin` and `end`. + :param str text: (optional) The textual contents of the current table from the + input document without associated markup content. + :param TableTextLocation section_title: (optional) Text and associated location + within a table. + :param TableTextLocation title: (optional) Text and associated location within a + table. + :param List[TableHeaders] table_headers: (optional) An array of table-level + cells that apply as headers to all the other cells in the current table. + :param List[TableRowHeaders] row_headers: (optional) An array of row-level + cells, each applicable as a header to other cells in the same row as itself, of + the current table. + :param List[TableColumnHeaders] column_headers: (optional) An array of + column-level cells, each applicable as a header to other cells in the same + column as itself, of the current table. + :param List[TableKeyValuePairs] key_value_pairs: (optional) An array of + key-value pairs identified in the current table. + :param List[TableBodyCells] body_cells: (optional) An array of cells that are + neither table header nor column header nor row header cells, of the current + table with corresponding row and column header associations. + :param List[TableTextLocation] contexts: (optional) An array of lists of textual + entries across the document related to the current table being parsed. + """ + + def __init__( + self, + *, + location: Optional['TableElementLocation'] = None, + text: Optional[str] = None, + section_title: Optional['TableTextLocation'] = None, + title: Optional['TableTextLocation'] = None, + table_headers: Optional[List['TableHeaders']] = None, + row_headers: Optional[List['TableRowHeaders']] = None, + column_headers: Optional[List['TableColumnHeaders']] = None, + key_value_pairs: Optional[List['TableKeyValuePairs']] = None, + body_cells: Optional[List['TableBodyCells']] = None, + contexts: Optional[List['TableTextLocation']] = None, + ) -> None: + """ + Initialize a TableResultTable object. + + :param TableElementLocation location: (optional) The numeric location of + the identified element in the document, represented with two integers + labeled `begin` and `end`. + :param str text: (optional) The textual contents of the current table from + the input document without associated markup content. + :param TableTextLocation section_title: (optional) Text and associated + location within a table. + :param TableTextLocation title: (optional) Text and associated location + within a table. + :param List[TableHeaders] table_headers: (optional) An array of table-level + cells that apply as headers to all the other cells in the current table. + :param List[TableRowHeaders] row_headers: (optional) An array of row-level + cells, each applicable as a header to other cells in the same row as + itself, of the current table. + :param List[TableColumnHeaders] column_headers: (optional) An array of + column-level cells, each applicable as a header to other cells in the same + column as itself, of the current table. + :param List[TableKeyValuePairs] key_value_pairs: (optional) An array of + key-value pairs identified in the current table. + :param List[TableBodyCells] body_cells: (optional) An array of cells that + are neither table header nor column header nor row header cells, of the + current table with corresponding row and column header associations. + :param List[TableTextLocation] contexts: (optional) An array of lists of + textual entries across the document related to the current table being + parsed. + """ + self.location = location + self.text = text + self.section_title = section_title + self.title = title + self.table_headers = table_headers + self.row_headers = row_headers + self.column_headers = column_headers + self.key_value_pairs = key_value_pairs + self.body_cells = body_cells + self.contexts = contexts + + @classmethod + def from_dict(cls, _dict: Dict) -> 'TableResultTable': + """Initialize a TableResultTable object from a json dictionary.""" + args = {} + if (location := _dict.get('location')) is not None: + args['location'] = TableElementLocation.from_dict(location) + if (text := _dict.get('text')) is not None: + args['text'] = text + if (section_title := _dict.get('section_title')) is not None: + args['section_title'] = TableTextLocation.from_dict(section_title) + if (title := _dict.get('title')) is not None: + args['title'] = TableTextLocation.from_dict(title) + if (table_headers := _dict.get('table_headers')) is not None: + args['table_headers'] = [ + TableHeaders.from_dict(v) for v in table_headers + ] + if (row_headers := _dict.get('row_headers')) is not None: + args['row_headers'] = [ + TableRowHeaders.from_dict(v) for v in row_headers + ] + if (column_headers := _dict.get('column_headers')) is not None: + args['column_headers'] = [ + TableColumnHeaders.from_dict(v) for v in column_headers + ] + if (key_value_pairs := _dict.get('key_value_pairs')) is not None: + args['key_value_pairs'] = [ + TableKeyValuePairs.from_dict(v) for v in key_value_pairs + ] + if (body_cells := _dict.get('body_cells')) is not None: + args['body_cells'] = [ + TableBodyCells.from_dict(v) for v in body_cells + ] + if (contexts := _dict.get('contexts')) is not None: + args['contexts'] = [ + TableTextLocation.from_dict(v) for v in contexts + ] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a TableResultTable object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'location') and self.location is not None: + if isinstance(self.location, dict): + _dict['location'] = self.location + else: + _dict['location'] = self.location.to_dict() + if hasattr(self, 'text') and self.text is not None: + _dict['text'] = self.text + if hasattr(self, 'section_title') and self.section_title is not None: + if isinstance(self.section_title, dict): + _dict['section_title'] = self.section_title + else: + _dict['section_title'] = self.section_title.to_dict() + if hasattr(self, 'title') and self.title is not None: + if isinstance(self.title, dict): + _dict['title'] = self.title + else: + _dict['title'] = self.title.to_dict() + if hasattr(self, 'table_headers') and self.table_headers is not None: + table_headers_list = [] + for v in self.table_headers: + if isinstance(v, dict): + table_headers_list.append(v) + else: + table_headers_list.append(v.to_dict()) + _dict['table_headers'] = table_headers_list + if hasattr(self, 'row_headers') and self.row_headers is not None: + row_headers_list = [] + for v in self.row_headers: + if isinstance(v, dict): + row_headers_list.append(v) + else: + row_headers_list.append(v.to_dict()) + _dict['row_headers'] = row_headers_list + if hasattr(self, 'column_headers') and self.column_headers is not None: + column_headers_list = [] + for v in self.column_headers: + if isinstance(v, dict): + column_headers_list.append(v) + else: + column_headers_list.append(v.to_dict()) + _dict['column_headers'] = column_headers_list + if hasattr(self, + 'key_value_pairs') and self.key_value_pairs is not None: + key_value_pairs_list = [] + for v in self.key_value_pairs: + if isinstance(v, dict): + key_value_pairs_list.append(v) + else: + key_value_pairs_list.append(v.to_dict()) + _dict['key_value_pairs'] = key_value_pairs_list + if hasattr(self, 'body_cells') and self.body_cells is not None: + body_cells_list = [] + for v in self.body_cells: + if isinstance(v, dict): + body_cells_list.append(v) + else: + body_cells_list.append(v.to_dict()) + _dict['body_cells'] = body_cells_list + if hasattr(self, 'contexts') and self.contexts is not None: + contexts_list = [] + for v in self.contexts: + if isinstance(v, dict): + contexts_list.append(v) + else: + contexts_list.append(v.to_dict()) + _dict['contexts'] = contexts_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this TableResultTable object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'TableResultTable') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'TableResultTable') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class TableRowHeaders: + """ + Row-level cells, each applicable as a header to other cells in the same row as itself, + of the current table. + + :param str cell_id: (optional) The unique ID of the cell in the current table. + :param TableElementLocation location: (optional) The numeric location of the + identified element in the document, represented with two integers labeled + `begin` and `end`. + :param str text: (optional) The textual contents of this cell from the input + document without associated markup content. + :param str text_normalized: (optional) Normalized row header text. + :param int row_index_begin: (optional) The `begin` index of this cell's `row` + location in the current table. + :param int row_index_end: (optional) The `end` index of this cell's `row` + location in the current table. + :param int column_index_begin: (optional) The `begin` index of this cell's + `column` location in the current table. + :param int column_index_end: (optional) The `end` index of this cell's `column` + location in the current table. + """ + + def __init__( + self, + *, + cell_id: Optional[str] = None, + location: Optional['TableElementLocation'] = None, + text: Optional[str] = None, + text_normalized: Optional[str] = None, + row_index_begin: Optional[int] = None, + row_index_end: Optional[int] = None, + column_index_begin: Optional[int] = None, + column_index_end: Optional[int] = None, + ) -> None: + """ + Initialize a TableRowHeaders object. + + :param str cell_id: (optional) The unique ID of the cell in the current + table. + :param TableElementLocation location: (optional) The numeric location of + the identified element in the document, represented with two integers + labeled `begin` and `end`. + :param str text: (optional) The textual contents of this cell from the + input document without associated markup content. + :param str text_normalized: (optional) Normalized row header text. + :param int row_index_begin: (optional) The `begin` index of this cell's + `row` location in the current table. + :param int row_index_end: (optional) The `end` index of this cell's `row` + location in the current table. + :param int column_index_begin: (optional) The `begin` index of this cell's + `column` location in the current table. + :param int column_index_end: (optional) The `end` index of this cell's + `column` location in the current table. + """ + self.cell_id = cell_id + self.location = location + self.text = text + self.text_normalized = text_normalized + self.row_index_begin = row_index_begin + self.row_index_end = row_index_end + self.column_index_begin = column_index_begin + self.column_index_end = column_index_end + + @classmethod + def from_dict(cls, _dict: Dict) -> 'TableRowHeaders': + """Initialize a TableRowHeaders object from a json dictionary.""" + args = {} + if (cell_id := _dict.get('cell_id')) is not None: + args['cell_id'] = cell_id + if (location := _dict.get('location')) is not None: + args['location'] = TableElementLocation.from_dict(location) + if (text := _dict.get('text')) is not None: + args['text'] = text + if (text_normalized := _dict.get('text_normalized')) is not None: + args['text_normalized'] = text_normalized + if (row_index_begin := _dict.get('row_index_begin')) is not None: + args['row_index_begin'] = row_index_begin + if (row_index_end := _dict.get('row_index_end')) is not None: + args['row_index_end'] = row_index_end + if (column_index_begin := _dict.get('column_index_begin')) is not None: + args['column_index_begin'] = column_index_begin + if (column_index_end := _dict.get('column_index_end')) is not None: + args['column_index_end'] = column_index_end + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a TableRowHeaders object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'cell_id') and self.cell_id is not None: + _dict['cell_id'] = self.cell_id + if hasattr(self, 'location') and self.location is not None: + if isinstance(self.location, dict): + _dict['location'] = self.location + else: + _dict['location'] = self.location.to_dict() + if hasattr(self, 'text') and self.text is not None: + _dict['text'] = self.text + if hasattr(self, + 'text_normalized') and self.text_normalized is not None: + _dict['text_normalized'] = self.text_normalized + if hasattr(self, + 'row_index_begin') and self.row_index_begin is not None: + _dict['row_index_begin'] = self.row_index_begin + if hasattr(self, 'row_index_end') and self.row_index_end is not None: + _dict['row_index_end'] = self.row_index_end + if hasattr( + self, + 'column_index_begin') and self.column_index_begin is not None: + _dict['column_index_begin'] = self.column_index_begin + if hasattr(self, + 'column_index_end') and self.column_index_end is not None: + _dict['column_index_end'] = self.column_index_end + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this TableRowHeaders object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'TableRowHeaders') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'TableRowHeaders') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class TableTextLocation: + """ + Text and associated location within a table. + + :param str text: (optional) The text retrieved. + :param TableElementLocation location: (optional) The numeric location of the + identified element in the document, represented with two integers labeled + `begin` and `end`. + """ + + def __init__( + self, + *, + text: Optional[str] = None, + location: Optional['TableElementLocation'] = None, + ) -> None: + """ + Initialize a TableTextLocation object. + + :param str text: (optional) The text retrieved. + :param TableElementLocation location: (optional) The numeric location of + the identified element in the document, represented with two integers + labeled `begin` and `end`. + """ + self.text = text + self.location = location + + @classmethod + def from_dict(cls, _dict: Dict) -> 'TableTextLocation': + """Initialize a TableTextLocation object from a json dictionary.""" + args = {} + if (text := _dict.get('text')) is not None: + args['text'] = text + if (location := _dict.get('location')) is not None: + args['location'] = TableElementLocation.from_dict(location) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a TableTextLocation object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'text') and self.text is not None: + _dict['text'] = self.text + if hasattr(self, 'location') and self.location is not None: + if isinstance(self.location, dict): + _dict['location'] = self.location + else: + _dict['location'] = self.location.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this TableTextLocation object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'TableTextLocation') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'TableTextLocation') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class TrainingExample: + """ + Object that contains example response details for a training query. + + :param str document_id: The document ID associated with this training example. + :param str collection_id: The collection ID associated with this training + example. + :param int relevance: The relevance score of the training example. Scores range + from `0` to `100`. Zero means not relevant. The higher the number, the more + relevant the example. + :param datetime created: (optional) The date and time the example was created. + :param datetime updated: (optional) The date and time the example was updated. + """ + + def __init__( + self, + document_id: str, + collection_id: str, + relevance: int, + *, + created: Optional[datetime] = None, + updated: Optional[datetime] = None, + ) -> None: + """ + Initialize a TrainingExample object. + + :param str document_id: The document ID associated with this training + example. + :param str collection_id: The collection ID associated with this training + example. + :param int relevance: The relevance score of the training example. Scores + range from `0` to `100`. Zero means not relevant. The higher the number, + the more relevant the example. + """ + self.document_id = document_id + self.collection_id = collection_id + self.relevance = relevance + self.created = created + self.updated = updated + + @classmethod + def from_dict(cls, _dict: Dict) -> 'TrainingExample': + """Initialize a TrainingExample object from a json dictionary.""" + args = {} + if (document_id := _dict.get('document_id')) is not None: + args['document_id'] = document_id + else: + raise ValueError( + 'Required property \'document_id\' not present in TrainingExample JSON' + ) + if (collection_id := _dict.get('collection_id')) is not None: + args['collection_id'] = collection_id + else: + raise ValueError( + 'Required property \'collection_id\' not present in TrainingExample JSON' + ) + if (relevance := _dict.get('relevance')) is not None: + args['relevance'] = relevance + else: + raise ValueError( + 'Required property \'relevance\' not present in TrainingExample JSON' + ) + if (created := _dict.get('created')) is not None: + args['created'] = string_to_datetime(created) + if (updated := _dict.get('updated')) is not None: + args['updated'] = string_to_datetime(updated) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a TrainingExample object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'document_id') and self.document_id is not None: + _dict['document_id'] = self.document_id + if hasattr(self, 'collection_id') and self.collection_id is not None: + _dict['collection_id'] = self.collection_id + if hasattr(self, 'relevance') and self.relevance is not None: + _dict['relevance'] = self.relevance + if hasattr(self, 'created') and getattr(self, 'created') is not None: + _dict['created'] = datetime_to_string(getattr(self, 'created')) + if hasattr(self, 'updated') and getattr(self, 'updated') is not None: + _dict['updated'] = datetime_to_string(getattr(self, 'updated')) + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this TrainingExample object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'TrainingExample') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'TrainingExample') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class TrainingQuery: + """ + Object that contains training query details. + + :param str query_id: (optional) The query ID associated with the training query. + :param str natural_language_query: The natural text query that is used as the + training query. + :param str filter: (optional) The filter used on the collection before the + **natural_language_query** is applied. Only specify a filter if the documents + that you consider to be most relevant are not included in the top 100 results + when you submit test queries. If you specify a filter during training, apply the + same filter to queries that are submitted at runtime for optimal ranking + results. + :param datetime created: (optional) The date and time the query was created. + :param datetime updated: (optional) The date and time the query was updated. + :param List[TrainingExample] examples: Array of training examples. + """ + + def __init__( + self, + natural_language_query: str, + examples: List['TrainingExample'], + *, + query_id: Optional[str] = None, + filter: Optional[str] = None, + created: Optional[datetime] = None, + updated: Optional[datetime] = None, + ) -> None: + """ + Initialize a TrainingQuery object. + + :param str natural_language_query: The natural text query that is used as + the training query. + :param List[TrainingExample] examples: Array of training examples. + :param str filter: (optional) The filter used on the collection before the + **natural_language_query** is applied. Only specify a filter if the + documents that you consider to be most relevant are not included in the top + 100 results when you submit test queries. If you specify a filter during + training, apply the same filter to queries that are submitted at runtime + for optimal ranking results. + """ + self.query_id = query_id + self.natural_language_query = natural_language_query + self.filter = filter + self.created = created + self.updated = updated + self.examples = examples + + @classmethod + def from_dict(cls, _dict: Dict) -> 'TrainingQuery': + """Initialize a TrainingQuery object from a json dictionary.""" + args = {} + if (query_id := _dict.get('query_id')) is not None: + args['query_id'] = query_id + if (natural_language_query := + _dict.get('natural_language_query')) is not None: + args['natural_language_query'] = natural_language_query + else: + raise ValueError( + 'Required property \'natural_language_query\' not present in TrainingQuery JSON' + ) + if (filter := _dict.get('filter')) is not None: + args['filter'] = filter + if (created := _dict.get('created')) is not None: + args['created'] = string_to_datetime(created) + if (updated := _dict.get('updated')) is not None: + args['updated'] = string_to_datetime(updated) + if (examples := _dict.get('examples')) is not None: + args['examples'] = [TrainingExample.from_dict(v) for v in examples] + else: + raise ValueError( + 'Required property \'examples\' not present in TrainingQuery JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a TrainingQuery object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'query_id') and getattr(self, 'query_id') is not None: + _dict['query_id'] = getattr(self, 'query_id') + if hasattr(self, 'natural_language_query' + ) and self.natural_language_query is not None: + _dict['natural_language_query'] = self.natural_language_query + if hasattr(self, 'filter') and self.filter is not None: + _dict['filter'] = self.filter + if hasattr(self, 'created') and getattr(self, 'created') is not None: + _dict['created'] = datetime_to_string(getattr(self, 'created')) + if hasattr(self, 'updated') and getattr(self, 'updated') is not None: + _dict['updated'] = datetime_to_string(getattr(self, 'updated')) + if hasattr(self, 'examples') and self.examples is not None: + examples_list = [] + for v in self.examples: + if isinstance(v, dict): + examples_list.append(v) + else: + examples_list.append(v.to_dict()) + _dict['examples'] = examples_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this TrainingQuery object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'TrainingQuery') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'TrainingQuery') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class TrainingQuerySet: + """ + Object specifying the training queries contained in the identified training set. + + :param List[TrainingQuery] queries: (optional) Array of training queries. At + least 50 queries are required for training to begin. A maximum of 10,000 queries + are returned. + """ + + def __init__( + self, + *, + queries: Optional[List['TrainingQuery']] = None, + ) -> None: + """ + Initialize a TrainingQuerySet object. + + :param List[TrainingQuery] queries: (optional) Array of training queries. + At least 50 queries are required for training to begin. A maximum of 10,000 + queries are returned. + """ + self.queries = queries + + @classmethod + def from_dict(cls, _dict: Dict) -> 'TrainingQuerySet': + """Initialize a TrainingQuerySet object from a json dictionary.""" + args = {} + if (queries := _dict.get('queries')) is not None: + args['queries'] = [TrainingQuery.from_dict(v) for v in queries] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a TrainingQuerySet object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'queries') and self.queries is not None: + queries_list = [] + for v in self.queries: + if isinstance(v, dict): + queries_list.append(v) + else: + queries_list.append(v.to_dict()) + _dict['queries'] = queries_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this TrainingQuerySet object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'TrainingQuerySet') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'TrainingQuerySet') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class UpdateDocumentClassifier: + """ + An object that contains a new name or description for a document classifier, updated + training data, or new or updated test data. + + :param str name: (optional) A new name for the classifier. + :param str description: (optional) A new description for the classifier. + """ + + def __init__( + self, + *, + name: Optional[str] = None, + description: Optional[str] = None, + ) -> None: + """ + Initialize a UpdateDocumentClassifier object. + + :param str name: (optional) A new name for the classifier. + :param str description: (optional) A new description for the classifier. + """ + self.name = name + self.description = description + + @classmethod + def from_dict(cls, _dict: Dict) -> 'UpdateDocumentClassifier': + """Initialize a UpdateDocumentClassifier object from a json dictionary.""" + args = {} + if (name := _dict.get('name')) is not None: + args['name'] = name + if (description := _dict.get('description')) is not None: + args['description'] = description + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a UpdateDocumentClassifier object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this UpdateDocumentClassifier object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'UpdateDocumentClassifier') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'UpdateDocumentClassifier') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class WebhookHeader: + """ + An array of headers to pass with the HTTP request. Optional when `type` is `webhook`. + Not valid when creating any other type of enrichment. + + :param str name: The name of an HTTP header. + :param str value: The value of an HTTP header. + """ + + def __init__( + self, + name: str, + value: str, + ) -> None: + """ + Initialize a WebhookHeader object. + + :param str name: The name of an HTTP header. + :param str value: The value of an HTTP header. + """ + self.name = name + self.value = value + + @classmethod + def from_dict(cls, _dict: Dict) -> 'WebhookHeader': + """Initialize a WebhookHeader object from a json dictionary.""" + args = {} + if (name := _dict.get('name')) is not None: + args['name'] = name + else: + raise ValueError( + 'Required property \'name\' not present in WebhookHeader JSON') + if (value := _dict.get('value')) is not None: + args['value'] = value + else: + raise ValueError( + 'Required property \'value\' not present in WebhookHeader JSON') + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a WebhookHeader object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'value') and self.value is not None: + _dict['value'] = self.value + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this WebhookHeader object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'WebhookHeader') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'WebhookHeader') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class PullBatchesResponse: + """ + A compressed newline delimited JSON (NDJSON) file containing the document. The NDJSON + format is used to describe structured data. The file name format is + `{batch_id}.ndjson.gz`. For more information, see [Binary attachment from the pull + batches + method](/docs/discovery-data?topic=discovery-data-external-enrichment#binary-attachment-pull-batches). + + :param str file: (optional) A compressed NDJSON file containing the document. + """ + + def __init__( + self, + *, + file: Optional[str] = None, + ) -> None: + """ + Initialize a PullBatchesResponse object. + + :param str file: (optional) A compressed NDJSON file containing the + document. + """ + self.file = file + + @classmethod + def from_dict(cls, _dict: Dict) -> 'PullBatchesResponse': + """Initialize a PullBatchesResponse object from a json dictionary.""" + args = {} + if (file := _dict.get('file')) is not None: + args['file'] = file + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a PullBatchesResponse object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'file') and self.file is not None: + _dict['file'] = self.file + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this PullBatchesResponse object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'PullBatchesResponse') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'PullBatchesResponse') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class QueryAggregationQueryCalculationAggregation(QueryAggregation): + """ + Returns a scalar calculation across all documents for the field specified. Possible + calculations include min, max, sum, average, and unique_count. + + :param str type: (optional) Specifies the calculation type, such as 'average`, + `max`, `min`, `sum`, or `unique_count`. + :param str field: The field to perform the calculation on. + :param float value: (optional) The value of the calculation. + """ + + def __init__( + self, + field: str, + *, + type: Optional[str] = None, + value: Optional[float] = None, + ) -> None: + """ + Initialize a QueryAggregationQueryCalculationAggregation object. + + :param str field: The field to perform the calculation on. + :param str type: (optional) Specifies the calculation type, such as + 'average`, `max`, `min`, `sum`, or `unique_count`. + :param float value: (optional) The value of the calculation. + """ + # pylint: disable=super-init-not-called + self.type = type + self.field = field + self.value = value + + @classmethod + def from_dict(cls, + _dict: Dict) -> 'QueryAggregationQueryCalculationAggregation': + """Initialize a QueryAggregationQueryCalculationAggregation object from a json dictionary.""" + args = {} + if (type := _dict.get('type')) is not None: + args['type'] = type + if (field := _dict.get('field')) is not None: + args['field'] = field + else: + raise ValueError( + 'Required property \'field\' not present in QueryAggregationQueryCalculationAggregation JSON' + ) + if (value := _dict.get('value')) is not None: + args['value'] = value + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a QueryAggregationQueryCalculationAggregation object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'field') and self.field is not None: + _dict['field'] = self.field + if hasattr(self, 'value') and self.value is not None: + _dict['value'] = self.value + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this QueryAggregationQueryCalculationAggregation object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, + other: 'QueryAggregationQueryCalculationAggregation') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, + other: 'QueryAggregationQueryCalculationAggregation') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class QueryAggregationQueryFilterAggregation(QueryAggregation): + """ + A modifier that narrows the document set of the subaggregations it precedes. + + :param str type: (optional) Specifies that the aggregation type is `filter`. + :param str match: The filter that is written in Discovery Query Language syntax + and is applied to the documents before subaggregations are run. + :param int matching_results: Number of documents that match the filter. + :param List[dict] aggregations: (optional) An array of subaggregations. + """ + + def __init__( + self, + match: str, + matching_results: int, + *, + type: Optional[str] = None, + aggregations: Optional[List[dict]] = None, + ) -> None: + """ + Initialize a QueryAggregationQueryFilterAggregation object. + + :param str match: The filter that is written in Discovery Query Language + syntax and is applied to the documents before subaggregations are run. + :param int matching_results: Number of documents that match the filter. + :param str type: (optional) Specifies that the aggregation type is + `filter`. + :param List[dict] aggregations: (optional) An array of subaggregations. + """ + # pylint: disable=super-init-not-called + self.type = type + self.match = match + self.matching_results = matching_results + self.aggregations = aggregations + + @classmethod + def from_dict(cls, _dict: Dict) -> 'QueryAggregationQueryFilterAggregation': + """Initialize a QueryAggregationQueryFilterAggregation object from a json dictionary.""" + args = {} + if (type := _dict.get('type')) is not None: + args['type'] = type + if (match := _dict.get('match')) is not None: + args['match'] = match + else: + raise ValueError( + 'Required property \'match\' not present in QueryAggregationQueryFilterAggregation JSON' + ) + if (matching_results := _dict.get('matching_results')) is not None: + args['matching_results'] = matching_results + else: + raise ValueError( + 'Required property \'matching_results\' not present in QueryAggregationQueryFilterAggregation JSON' + ) + if (aggregations := _dict.get('aggregations')) is not None: + args['aggregations'] = aggregations + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a QueryAggregationQueryFilterAggregation object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'match') and self.match is not None: + _dict['match'] = self.match + if hasattr(self, + 'matching_results') and self.matching_results is not None: + _dict['matching_results'] = self.matching_results + if hasattr(self, 'aggregations') and self.aggregations is not None: + _dict['aggregations'] = self.aggregations + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this QueryAggregationQueryFilterAggregation object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'QueryAggregationQueryFilterAggregation') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'QueryAggregationQueryFilterAggregation') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class QueryAggregationQueryGroupByAggregation(QueryAggregation): + """ + Separates document results into groups that meet the conditions you specify. + + :param str type: (optional) Specifies that the aggregation type is `group_by`. + :param List[QueryGroupByAggregationResult] results: (optional) An array of + results. + """ + + def __init__( + self, + *, + type: Optional[str] = None, + results: Optional[List['QueryGroupByAggregationResult']] = None, + ) -> None: + """ + Initialize a QueryAggregationQueryGroupByAggregation object. + + :param str type: (optional) Specifies that the aggregation type is + `group_by`. + :param List[QueryGroupByAggregationResult] results: (optional) An array of + results. + """ + # pylint: disable=super-init-not-called + self.type = type + self.results = results + + @classmethod + def from_dict(cls, + _dict: Dict) -> 'QueryAggregationQueryGroupByAggregation': + """Initialize a QueryAggregationQueryGroupByAggregation object from a json dictionary.""" + args = {} + if (type := _dict.get('type')) is not None: + args['type'] = type + if (results := _dict.get('results')) is not None: + args['results'] = [ + QueryGroupByAggregationResult.from_dict(v) for v in results + ] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a QueryAggregationQueryGroupByAggregation object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'results') and self.results is not None: + results_list = [] + for v in self.results: + if isinstance(v, dict): + results_list.append(v) + else: + results_list.append(v.to_dict()) + _dict['results'] = results_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this QueryAggregationQueryGroupByAggregation object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'QueryAggregationQueryGroupByAggregation') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'QueryAggregationQueryGroupByAggregation') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class QueryAggregationQueryHistogramAggregation(QueryAggregation): + """ + Numeric interval segments to categorize documents by using field values from a single + numeric field to describe the category. + + :param str type: (optional) Specifies that the aggregation type is `histogram`. + :param str field: The numeric field name used to create the histogram. + :param int interval: The size of the sections that the results are split into. + :param str name: (optional) Identifier that can optionally be specified in the + query request of this aggregation. + :param List[QueryHistogramAggregationResult] results: (optional) Array of + numeric intervals. + """ + + def __init__( + self, + field: str, + interval: int, + *, + type: Optional[str] = None, + name: Optional[str] = None, + results: Optional[List['QueryHistogramAggregationResult']] = None, + ) -> None: + """ + Initialize a QueryAggregationQueryHistogramAggregation object. + + :param str field: The numeric field name used to create the histogram. + :param int interval: The size of the sections that the results are split + into. + :param str type: (optional) Specifies that the aggregation type is + `histogram`. + :param str name: (optional) Identifier that can optionally be specified in + the query request of this aggregation. + :param List[QueryHistogramAggregationResult] results: (optional) Array of + numeric intervals. + """ + # pylint: disable=super-init-not-called + self.type = type + self.field = field + self.interval = interval + self.name = name + self.results = results + + @classmethod + def from_dict(cls, + _dict: Dict) -> 'QueryAggregationQueryHistogramAggregation': + """Initialize a QueryAggregationQueryHistogramAggregation object from a json dictionary.""" + args = {} + if (type := _dict.get('type')) is not None: + args['type'] = type + if (field := _dict.get('field')) is not None: + args['field'] = field + else: + raise ValueError( + 'Required property \'field\' not present in QueryAggregationQueryHistogramAggregation JSON' + ) + if (interval := _dict.get('interval')) is not None: + args['interval'] = interval + else: + raise ValueError( + 'Required property \'interval\' not present in QueryAggregationQueryHistogramAggregation JSON' + ) + if (name := _dict.get('name')) is not None: + args['name'] = name + if (results := _dict.get('results')) is not None: + args['results'] = [ + QueryHistogramAggregationResult.from_dict(v) for v in results + ] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a QueryAggregationQueryHistogramAggregation object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'field') and self.field is not None: + _dict['field'] = self.field + if hasattr(self, 'interval') and self.interval is not None: + _dict['interval'] = self.interval + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'results') and self.results is not None: + results_list = [] + for v in self.results: + if isinstance(v, dict): + results_list.append(v) + else: + results_list.append(v.to_dict()) + _dict['results'] = results_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this QueryAggregationQueryHistogramAggregation object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, + other: 'QueryAggregationQueryHistogramAggregation') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, + other: 'QueryAggregationQueryHistogramAggregation') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class QueryAggregationQueryNestedAggregation(QueryAggregation): + """ + A restriction that alters the document set that is used by the aggregations that it + precedes. Subsequent aggregations are applied to nested documents from the specified + field. + + :param str type: (optional) Specifies that the aggregation type is `nested`. + :param str path: The path to the document field to scope subsequent aggregations + to. + :param int matching_results: Number of nested documents found in the specified + field. + :param List[dict] aggregations: (optional) An array of subaggregations. + """ + + def __init__( + self, + path: str, + matching_results: int, + *, + type: Optional[str] = None, + aggregations: Optional[List[dict]] = None, + ) -> None: + """ + Initialize a QueryAggregationQueryNestedAggregation object. + + :param str path: The path to the document field to scope subsequent + aggregations to. + :param int matching_results: Number of nested documents found in the + specified field. + :param str type: (optional) Specifies that the aggregation type is + `nested`. + :param List[dict] aggregations: (optional) An array of subaggregations. + """ + # pylint: disable=super-init-not-called + self.type = type + self.path = path + self.matching_results = matching_results + self.aggregations = aggregations + + @classmethod + def from_dict(cls, _dict: Dict) -> 'QueryAggregationQueryNestedAggregation': + """Initialize a QueryAggregationQueryNestedAggregation object from a json dictionary.""" + args = {} + if (type := _dict.get('type')) is not None: + args['type'] = type + if (path := _dict.get('path')) is not None: + args['path'] = path + else: + raise ValueError( + 'Required property \'path\' not present in QueryAggregationQueryNestedAggregation JSON' + ) + if (matching_results := _dict.get('matching_results')) is not None: + args['matching_results'] = matching_results + else: + raise ValueError( + 'Required property \'matching_results\' not present in QueryAggregationQueryNestedAggregation JSON' + ) + if (aggregations := _dict.get('aggregations')) is not None: + args['aggregations'] = aggregations + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a QueryAggregationQueryNestedAggregation object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'path') and self.path is not None: + _dict['path'] = self.path + if hasattr(self, + 'matching_results') and self.matching_results is not None: + _dict['matching_results'] = self.matching_results + if hasattr(self, 'aggregations') and self.aggregations is not None: + _dict['aggregations'] = self.aggregations + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this QueryAggregationQueryNestedAggregation object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'QueryAggregationQueryNestedAggregation') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'QueryAggregationQueryNestedAggregation') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class QueryAggregationQueryPairAggregation(QueryAggregation): + """ + Calculates relevancy values using combinations of document sets from results of the + specified pair of aggregations. + + :param str type: (optional) Specifies that the aggregation type is `pair`. + :param str first: (optional) Specifies the first aggregation in the pair. The + aggregation must be a `term`, `group_by`, `histogram`, or `timeslice` + aggregation type. + :param str second: (optional) Specifies the second aggregation in the pair. The + aggregation must be a `term`, `group_by`, `histogram`, or `timeslice` + aggregation type. + :param bool show_estimated_matching_results: (optional) Indicates whether to + include estimated matching result information. + :param bool show_total_matching_documents: (optional) Indicates whether to + include total matching documents information. + :param List[QueryPairAggregationResult] results: (optional) An array of + aggregations. + """ + + def __init__( + self, + *, + type: Optional[str] = None, + first: Optional[str] = None, + second: Optional[str] = None, + show_estimated_matching_results: Optional[bool] = None, + show_total_matching_documents: Optional[bool] = None, + results: Optional[List['QueryPairAggregationResult']] = None, + ) -> None: + """ + Initialize a QueryAggregationQueryPairAggregation object. + + :param str type: (optional) Specifies that the aggregation type is `pair`. + :param str first: (optional) Specifies the first aggregation in the pair. + The aggregation must be a `term`, `group_by`, `histogram`, or `timeslice` + aggregation type. + :param str second: (optional) Specifies the second aggregation in the pair. + The aggregation must be a `term`, `group_by`, `histogram`, or `timeslice` + aggregation type. + :param bool show_estimated_matching_results: (optional) Indicates whether + to include estimated matching result information. + :param bool show_total_matching_documents: (optional) Indicates whether to + include total matching documents information. + :param List[QueryPairAggregationResult] results: (optional) An array of + aggregations. + """ + # pylint: disable=super-init-not-called + self.type = type + self.first = first + self.second = second + self.show_estimated_matching_results = show_estimated_matching_results + self.show_total_matching_documents = show_total_matching_documents + self.results = results + + @classmethod + def from_dict(cls, _dict: Dict) -> 'QueryAggregationQueryPairAggregation': + """Initialize a QueryAggregationQueryPairAggregation object from a json dictionary.""" + args = {} + if (type := _dict.get('type')) is not None: + args['type'] = type + if (first := _dict.get('first')) is not None: + args['first'] = first + if (second := _dict.get('second')) is not None: + args['second'] = second + if (show_estimated_matching_results := + _dict.get('show_estimated_matching_results')) is not None: + args[ + 'show_estimated_matching_results'] = show_estimated_matching_results + if (show_total_matching_documents := + _dict.get('show_total_matching_documents')) is not None: + args[ + 'show_total_matching_documents'] = show_total_matching_documents + if (results := _dict.get('results')) is not None: + args['results'] = [ + QueryPairAggregationResult.from_dict(v) for v in results + ] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a QueryAggregationQueryPairAggregation object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'first') and self.first is not None: + _dict['first'] = self.first + if hasattr(self, 'second') and self.second is not None: + _dict['second'] = self.second + if hasattr(self, 'show_estimated_matching_results' + ) and self.show_estimated_matching_results is not None: + _dict[ + 'show_estimated_matching_results'] = self.show_estimated_matching_results + if hasattr(self, 'show_total_matching_documents' + ) and self.show_total_matching_documents is not None: + _dict[ + 'show_total_matching_documents'] = self.show_total_matching_documents + if hasattr(self, 'results') and self.results is not None: + results_list = [] + for v in self.results: + if isinstance(v, dict): + results_list.append(v) + else: + results_list.append(v.to_dict()) + _dict['results'] = results_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this QueryAggregationQueryPairAggregation object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'QueryAggregationQueryPairAggregation') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'QueryAggregationQueryPairAggregation') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class QueryAggregationQueryTermAggregation(QueryAggregation): + """ + Returns results from the field that is specified. + + :param str type: (optional) Specifies that the aggregation type is `term`. + :param str field: (optional) The field in the document where the values come + from. + :param int count: (optional) The number of results returned. Not returned if + `relevancy:true` is specified in the request. + :param str name: (optional) Identifier specified in the query request of this + aggregation. Not returned if `relevancy:true` is specified in the request. + :param List[QueryTermAggregationResult] results: (optional) An array of results. + """ + + def __init__( + self, + *, + type: Optional[str] = None, + field: Optional[str] = None, + count: Optional[int] = None, + name: Optional[str] = None, + results: Optional[List['QueryTermAggregationResult']] = None, + ) -> None: + """ + Initialize a QueryAggregationQueryTermAggregation object. + + :param str type: (optional) Specifies that the aggregation type is `term`. + :param str field: (optional) The field in the document where the values + come from. + :param int count: (optional) The number of results returned. Not returned + if `relevancy:true` is specified in the request. + :param str name: (optional) Identifier specified in the query request of + this aggregation. Not returned if `relevancy:true` is specified in the + request. + :param List[QueryTermAggregationResult] results: (optional) An array of + results. + """ + # pylint: disable=super-init-not-called + self.type = type + self.field = field + self.count = count + self.name = name + self.results = results + + @classmethod + def from_dict(cls, _dict: Dict) -> 'QueryAggregationQueryTermAggregation': + """Initialize a QueryAggregationQueryTermAggregation object from a json dictionary.""" + args = {} + if (type := _dict.get('type')) is not None: + args['type'] = type + if (field := _dict.get('field')) is not None: + args['field'] = field + if (count := _dict.get('count')) is not None: + args['count'] = count + if (name := _dict.get('name')) is not None: + args['name'] = name + if (results := _dict.get('results')) is not None: + args['results'] = [ + QueryTermAggregationResult.from_dict(v) for v in results + ] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a QueryAggregationQueryTermAggregation object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'field') and self.field is not None: + _dict['field'] = self.field + if hasattr(self, 'count') and self.count is not None: + _dict['count'] = self.count + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'results') and self.results is not None: + results_list = [] + for v in self.results: + if isinstance(v, dict): + results_list.append(v) + else: + results_list.append(v.to_dict()) + _dict['results'] = results_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this QueryAggregationQueryTermAggregation object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'QueryAggregationQueryTermAggregation') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'QueryAggregationQueryTermAggregation') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class QueryAggregationQueryTimesliceAggregation(QueryAggregation): + """ + A specialized histogram aggregation that uses dates to create interval segments. + + :param str type: (optional) Specifies that the aggregation type is `timeslice`. + :param str field: The date field name used to create the timeslice. + :param str interval: The date interval value. Valid values are seconds, minutes, + hours, days, weeks, and years. + :param str name: (optional) Identifier that can optionally be specified in the + query request of this aggregation. + :param List[QueryTimesliceAggregationResult] results: (optional) Array of + aggregation results. + """ + + def __init__( + self, + field: str, + interval: str, + *, + type: Optional[str] = None, + name: Optional[str] = None, + results: Optional[List['QueryTimesliceAggregationResult']] = None, + ) -> None: + """ + Initialize a QueryAggregationQueryTimesliceAggregation object. + + :param str field: The date field name used to create the timeslice. + :param str interval: The date interval value. Valid values are seconds, + minutes, hours, days, weeks, and years. + :param str type: (optional) Specifies that the aggregation type is + `timeslice`. + :param str name: (optional) Identifier that can optionally be specified in + the query request of this aggregation. + :param List[QueryTimesliceAggregationResult] results: (optional) Array of + aggregation results. + """ + # pylint: disable=super-init-not-called + self.type = type + self.field = field + self.interval = interval + self.name = name + self.results = results + + @classmethod + def from_dict(cls, + _dict: Dict) -> 'QueryAggregationQueryTimesliceAggregation': + """Initialize a QueryAggregationQueryTimesliceAggregation object from a json dictionary.""" + args = {} + if (type := _dict.get('type')) is not None: + args['type'] = type + if (field := _dict.get('field')) is not None: + args['field'] = field + else: + raise ValueError( + 'Required property \'field\' not present in QueryAggregationQueryTimesliceAggregation JSON' + ) + if (interval := _dict.get('interval')) is not None: + args['interval'] = interval + else: + raise ValueError( + 'Required property \'interval\' not present in QueryAggregationQueryTimesliceAggregation JSON' + ) + if (name := _dict.get('name')) is not None: + args['name'] = name + if (results := _dict.get('results')) is not None: + args['results'] = [ + QueryTimesliceAggregationResult.from_dict(v) for v in results + ] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a QueryAggregationQueryTimesliceAggregation object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'field') and self.field is not None: + _dict['field'] = self.field + if hasattr(self, 'interval') and self.interval is not None: + _dict['interval'] = self.interval + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'results') and self.results is not None: + results_list = [] + for v in self.results: + if isinstance(v, dict): + results_list.append(v) + else: + results_list.append(v.to_dict()) + _dict['results'] = results_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this QueryAggregationQueryTimesliceAggregation object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, + other: 'QueryAggregationQueryTimesliceAggregation') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, + other: 'QueryAggregationQueryTimesliceAggregation') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class QueryAggregationQueryTopHitsAggregation(QueryAggregation): + """ + Returns the top documents ranked by the score of the query. + + :param str type: (optional) Specifies that the aggregation type is `top_hits`. + :param int size: The number of documents to return. + :param str name: (optional) Identifier specified in the query request of this + aggregation. + :param QueryTopHitsAggregationResult hits: (optional) A query response that + contains the matching documents for the preceding aggregations. + """ + + def __init__( + self, + size: int, + *, + type: Optional[str] = None, + name: Optional[str] = None, + hits: Optional['QueryTopHitsAggregationResult'] = None, + ) -> None: + """ + Initialize a QueryAggregationQueryTopHitsAggregation object. + + :param int size: The number of documents to return. + :param str type: (optional) Specifies that the aggregation type is + `top_hits`. + :param str name: (optional) Identifier specified in the query request of + this aggregation. + :param QueryTopHitsAggregationResult hits: (optional) A query response that + contains the matching documents for the preceding aggregations. + """ + # pylint: disable=super-init-not-called + self.type = type + self.size = size + self.name = name + self.hits = hits + + @classmethod + def from_dict(cls, + _dict: Dict) -> 'QueryAggregationQueryTopHitsAggregation': + """Initialize a QueryAggregationQueryTopHitsAggregation object from a json dictionary.""" + args = {} + if (type := _dict.get('type')) is not None: + args['type'] = type + if (size := _dict.get('size')) is not None: + args['size'] = size + else: + raise ValueError( + 'Required property \'size\' not present in QueryAggregationQueryTopHitsAggregation JSON' + ) + if (name := _dict.get('name')) is not None: + args['name'] = name + if (hits := _dict.get('hits')) is not None: + args['hits'] = QueryTopHitsAggregationResult.from_dict(hits) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a QueryAggregationQueryTopHitsAggregation object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'size') and self.size is not None: + _dict['size'] = self.size + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'hits') and self.hits is not None: + if isinstance(self.hits, dict): + _dict['hits'] = self.hits + else: + _dict['hits'] = self.hits.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this QueryAggregationQueryTopHitsAggregation object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'QueryAggregationQueryTopHitsAggregation') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'QueryAggregationQueryTopHitsAggregation') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class QueryAggregationQueryTopicAggregation(QueryAggregation): + """ + Detects how much the frequency of a given facet value deviates from the expected + average for the given time period. This aggregation type does not use data from + previous time periods. It calculates an index by using the averages of frequency + counts of other facet values for the given time period. + + :param str type: (optional) Specifies that the aggregation type is `topic`. + :param str facet: (optional) Specifies the `term` or `group_by` aggregation for + the facet that you want to analyze. + :param str time_segments: (optional) Specifies the `timeslice` aggregation that + defines the time segments. + :param bool show_estimated_matching_results: (optional) Indicates whether to + include estimated matching result information. + :param bool show_total_matching_documents: (optional) Indicates whether to + include total matching documents information. + :param List[QueryTopicAggregationResult] results: (optional) An array of + aggregations. + """ + + def __init__( + self, + *, + type: Optional[str] = None, + facet: Optional[str] = None, + time_segments: Optional[str] = None, + show_estimated_matching_results: Optional[bool] = None, + show_total_matching_documents: Optional[bool] = None, + results: Optional[List['QueryTopicAggregationResult']] = None, + ) -> None: + """ + Initialize a QueryAggregationQueryTopicAggregation object. + + :param str type: (optional) Specifies that the aggregation type is `topic`. + :param str facet: (optional) Specifies the `term` or `group_by` aggregation + for the facet that you want to analyze. + :param str time_segments: (optional) Specifies the `timeslice` aggregation + that defines the time segments. + :param bool show_estimated_matching_results: (optional) Indicates whether + to include estimated matching result information. + :param bool show_total_matching_documents: (optional) Indicates whether to + include total matching documents information. + :param List[QueryTopicAggregationResult] results: (optional) An array of + aggregations. + """ + # pylint: disable=super-init-not-called + self.type = type + self.facet = facet + self.time_segments = time_segments + self.show_estimated_matching_results = show_estimated_matching_results + self.show_total_matching_documents = show_total_matching_documents + self.results = results + + @classmethod + def from_dict(cls, _dict: Dict) -> 'QueryAggregationQueryTopicAggregation': + """Initialize a QueryAggregationQueryTopicAggregation object from a json dictionary.""" + args = {} + if (type := _dict.get('type')) is not None: + args['type'] = type + if (facet := _dict.get('facet')) is not None: + args['facet'] = facet + if (time_segments := _dict.get('time_segments')) is not None: + args['time_segments'] = time_segments + if (show_estimated_matching_results := + _dict.get('show_estimated_matching_results')) is not None: + args[ + 'show_estimated_matching_results'] = show_estimated_matching_results + if (show_total_matching_documents := + _dict.get('show_total_matching_documents')) is not None: + args[ + 'show_total_matching_documents'] = show_total_matching_documents + if (results := _dict.get('results')) is not None: + args['results'] = [ + QueryTopicAggregationResult.from_dict(v) for v in results + ] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a QueryAggregationQueryTopicAggregation object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'facet') and self.facet is not None: + _dict['facet'] = self.facet + if hasattr(self, 'time_segments') and self.time_segments is not None: + _dict['time_segments'] = self.time_segments + if hasattr(self, 'show_estimated_matching_results' + ) and self.show_estimated_matching_results is not None: + _dict[ + 'show_estimated_matching_results'] = self.show_estimated_matching_results + if hasattr(self, 'show_total_matching_documents' + ) and self.show_total_matching_documents is not None: + _dict[ + 'show_total_matching_documents'] = self.show_total_matching_documents + if hasattr(self, 'results') and self.results is not None: + results_list = [] + for v in self.results: + if isinstance(v, dict): + results_list.append(v) + else: + results_list.append(v.to_dict()) + _dict['results'] = results_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this QueryAggregationQueryTopicAggregation object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'QueryAggregationQueryTopicAggregation') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'QueryAggregationQueryTopicAggregation') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class QueryAggregationQueryTrendAggregation(QueryAggregation): + """ + Detects sharp and unexpected changes in the frequency of a facet or facet value over + time based on the past history of frequency changes of the facet value. + + :param str type: (optional) Specifies that the aggregation type is `trend`. + :param str facet: (optional) Specifies the `term` or `group_by` aggregation for + the facet that you want to analyze. + :param str time_segments: (optional) Specifies the `timeslice` aggregation that + defines the time segments. + :param bool show_estimated_matching_results: (optional) Indicates whether to + include estimated matching result information. + :param bool show_total_matching_documents: (optional) Indicates whether to + include total matching documents information. + :param List[QueryTrendAggregationResult] results: (optional) An array of + aggregations. + """ + + def __init__( + self, + *, + type: Optional[str] = None, + facet: Optional[str] = None, + time_segments: Optional[str] = None, + show_estimated_matching_results: Optional[bool] = None, + show_total_matching_documents: Optional[bool] = None, + results: Optional[List['QueryTrendAggregationResult']] = None, + ) -> None: + """ + Initialize a QueryAggregationQueryTrendAggregation object. + + :param str type: (optional) Specifies that the aggregation type is `trend`. + :param str facet: (optional) Specifies the `term` or `group_by` aggregation + for the facet that you want to analyze. + :param str time_segments: (optional) Specifies the `timeslice` aggregation + that defines the time segments. + :param bool show_estimated_matching_results: (optional) Indicates whether + to include estimated matching result information. + :param bool show_total_matching_documents: (optional) Indicates whether to + include total matching documents information. + :param List[QueryTrendAggregationResult] results: (optional) An array of + aggregations. + """ + # pylint: disable=super-init-not-called + self.type = type + self.facet = facet + self.time_segments = time_segments + self.show_estimated_matching_results = show_estimated_matching_results + self.show_total_matching_documents = show_total_matching_documents + self.results = results + + @classmethod + def from_dict(cls, _dict: Dict) -> 'QueryAggregationQueryTrendAggregation': + """Initialize a QueryAggregationQueryTrendAggregation object from a json dictionary.""" + args = {} + if (type := _dict.get('type')) is not None: + args['type'] = type + if (facet := _dict.get('facet')) is not None: + args['facet'] = facet + if (time_segments := _dict.get('time_segments')) is not None: + args['time_segments'] = time_segments + if (show_estimated_matching_results := + _dict.get('show_estimated_matching_results')) is not None: + args[ + 'show_estimated_matching_results'] = show_estimated_matching_results + if (show_total_matching_documents := + _dict.get('show_total_matching_documents')) is not None: + args[ + 'show_total_matching_documents'] = show_total_matching_documents + if (results := _dict.get('results')) is not None: + args['results'] = [ + QueryTrendAggregationResult.from_dict(v) for v in results + ] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a QueryAggregationQueryTrendAggregation object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'facet') and self.facet is not None: + _dict['facet'] = self.facet + if hasattr(self, 'time_segments') and self.time_segments is not None: + _dict['time_segments'] = self.time_segments + if hasattr(self, 'show_estimated_matching_results' + ) and self.show_estimated_matching_results is not None: + _dict[ + 'show_estimated_matching_results'] = self.show_estimated_matching_results + if hasattr(self, 'show_total_matching_documents' + ) and self.show_total_matching_documents is not None: + _dict[ + 'show_total_matching_documents'] = self.show_total_matching_documents + if hasattr(self, 'results') and self.results is not None: + results_list = [] + for v in self.results: + if isinstance(v, dict): + results_list.append(v) + else: + results_list.append(v.to_dict()) + _dict['results'] = results_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this QueryAggregationQueryTrendAggregation object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'QueryAggregationQueryTrendAggregation') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'QueryAggregationQueryTrendAggregation') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other diff --git a/ibm_watson/language_translator_v3.py b/ibm_watson/language_translator_v3.py deleted file mode 100644 index e9c8d189f..000000000 --- a/ibm_watson/language_translator_v3.py +++ /dev/null @@ -1,1019 +0,0 @@ -# coding: utf-8 - -# Copyright 2018 IBM All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -IBM Watson™ Language Translator translates text from one language to another. The -service offers multiple IBM provided translation models that you can customize based on -your unique terminology and language. Use Language Translator to take news from across the -globe and present it in your language, communicate with your customers in their own -language, and more. -""" - -from __future__ import absolute_import - -import json -from .common import get_sdk_headers -from ibm_cloud_sdk_core import BaseService - -############################################################################## -# Service -############################################################################## - - -class LanguageTranslatorV3(BaseService): - """The Language Translator V3 service.""" - - default_url = 'https://gateway.watsonplatform.net/language-translator/api' - - def __init__( - self, - version, - url=default_url, - username=None, - password=None, - iam_apikey=None, - iam_access_token=None, - iam_url=None, - ): - """ - Construct a new client for the Language Translator service. - - :param str version: The API version date to use with the service, in - "YYYY-MM-DD" format. Whenever the API is changed in a backwards - incompatible way, a new minor version of the API is released. - The service uses the API version for the date you specify, or - the most recent version before that date. Note that you should - not programmatically specify the current date at runtime, in - case the API has been updated since your application's release. - Instead, specify a version date that is compatible with your - application, and don't change it until your application is - ready for a later version. - - :param str url: The base url to use when contacting the service (e.g. - "https://gateway.watsonplatform.net/language-translator/api/language-translator/api"). - The base url may differ between IBM Cloud regions. - - :param str username: The username used to authenticate with the service. - Username and password credentials are only required to run your - application locally or outside of IBM Cloud. When running on - IBM Cloud, the credentials will be automatically loaded from the - `VCAP_SERVICES` environment variable. - - :param str password: The password used to authenticate with the service. - Username and password credentials are only required to run your - application locally or outside of IBM Cloud. When running on - IBM Cloud, the credentials will be automatically loaded from the - `VCAP_SERVICES` environment variable. - - :param str iam_apikey: An API key that can be used to request IAM tokens. If - this API key is provided, the SDK will manage the token and handle the - refreshing. - - :param str iam_access_token: An IAM access token is fully managed by the application. - Responsibility falls on the application to refresh the token, either before - it expires or reactively upon receiving a 401 from the service as any requests - made with an expired token will fail. - - :param str iam_url: An optional URL for the IAM service API. Defaults to - 'https://iam.cloud.ibm.com/identity/token'. - """ - - BaseService.__init__( - self, - vcap_services_name='language_translator', - url=url, - username=username, - password=password, - iam_apikey=iam_apikey, - iam_access_token=iam_access_token, - iam_url=iam_url, - use_vcap_services=True, - display_name='Language Translator') - self.version = version - - ######################### - # Translation - ######################### - - def translate(self, text, model_id=None, source=None, target=None, - **kwargs): - """ - Translate. - - Translates the input text from the source language to the target language. - - :param list[str] text: Input text in UTF-8 encoding. Multiple entries will result - in multiple translations in the response. - :param str model_id: A globally unique string that identifies the underlying model - that is used for translation. - :param str source: Translation source language code. - :param str target: Translation target language code. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if text is None: - raise ValueError('text must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('language_translator', 'V3', 'translate') - headers.update(sdk_headers) - - params = {'version': self.version} - - data = { - 'text': text, - 'model_id': model_id, - 'source': source, - 'target': target - } - - url = '/v3/translate' - response = self.request( - method='POST', - url=url, - headers=headers, - params=params, - json=data, - accept_json=True) - return response - - ######################### - # Identification - ######################### - - def identify(self, text, **kwargs): - """ - Identify language. - - Identifies the language of the input text. - - :param str text: Input text in UTF-8 format. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if text is None: - raise ValueError('text must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('language_translator', 'V3', 'identify') - headers.update(sdk_headers) - - params = {'version': self.version} - - data = text - headers['content-type'] = 'text/plain' - - url = '/v3/identify' - response = self.request( - method='POST', - url=url, - headers=headers, - params=params, - data=data, - accept_json=True) - return response - - def list_identifiable_languages(self, **kwargs): - """ - List identifiable languages. - - Lists the languages that the service can identify. Returns the language code (for - example, `en` for English or `es` for Spanish) and name of each language. - - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('language_translator', 'V3', - 'list_identifiable_languages') - headers.update(sdk_headers) - - params = {'version': self.version} - - url = '/v3/identifiable_languages' - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - ######################### - # Models - ######################### - - def create_model(self, - base_model_id, - forced_glossary=None, - parallel_corpus=None, - name=None, - **kwargs): - """ - Create model. - - Uploads Translation Memory eXchange (TMX) files to customize a translation model. - You can either customize a model with a forced glossary or with a corpus that - contains parallel sentences. To create a model that is customized with a parallel - corpus and a forced glossary, proceed in two steps: customize with a - parallel corpus first and then customize the resulting model with a glossary. - Depending on the type of customization and the size of the uploaded corpora, - training can range from minutes for a glossary to several hours for a large - parallel corpus. You can upload a single forced glossary file and this file must - be less than 10 MB. You can upload multiple parallel corpora tmx files. The - cumulative file size of all uploaded files is limited to 250 MB. To - successfully train with a parallel corpus you must have at least 5,000 parallel - sentences in your corpus. - You can have a maxium of 10 custom models per language pair. - - :param str base_model_id: The model ID of the model to use as the base for - customization. To see available models, use the `List models` method. Usually all - IBM provided models are customizable. In addition, all your models that have been - created via parallel corpus customization, can be further customized with a forced - glossary. - :param file forced_glossary: A TMX file with your customizations. The - customizations in the file completely overwrite the domain translaton data, - including high frequency or high confidence phrase translations. You can upload - only one glossary with a file size less than 10 MB per call. A forced glossary - should contain single words or short phrases. - :param file parallel_corpus: A TMX file with parallel sentences for source and - target language. You can upload multiple parallel_corpus files in one request. All - uploaded parallel_corpus files combined, your parallel corpus must contain at - least 5,000 parallel sentences to train successfully. - :param str name: An optional model name that you can use to identify the model. - Valid characters are letters, numbers, dashes, underscores, spaces and - apostrophes. The maximum length is 32 characters. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if base_model_id is None: - raise ValueError('base_model_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('language_translator', 'V3', - 'create_model') - headers.update(sdk_headers) - - params = { - 'version': self.version, - 'base_model_id': base_model_id, - 'name': name - } - - form_data = {} - if forced_glossary: - form_data['forced_glossary'] = (None, forced_glossary, - 'application/octet-stream') - if parallel_corpus: - form_data['parallel_corpus'] = (None, parallel_corpus, - 'application/octet-stream') - - url = '/v3/models' - response = self.request( - method='POST', - url=url, - headers=headers, - params=params, - files=form_data, - accept_json=True) - return response - - def delete_model(self, model_id, **kwargs): - """ - Delete model. - - Deletes a custom translation model. - - :param str model_id: Model ID of the model to delete. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if model_id is None: - raise ValueError('model_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('language_translator', 'V3', - 'delete_model') - headers.update(sdk_headers) - - params = {'version': self.version} - - url = '/v3/models/{0}'.format(*self._encode_path_vars(model_id)) - response = self.request( - method='DELETE', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def get_model(self, model_id, **kwargs): - """ - Get model details. - - Gets information about a translation model, including training status for custom - models. Use this API call to poll the status of your customization request. A - successfully completed training will have a status of `available`. - - :param str model_id: Model ID of the model to get. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if model_id is None: - raise ValueError('model_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('language_translator', 'V3', 'get_model') - headers.update(sdk_headers) - - params = {'version': self.version} - - url = '/v3/models/{0}'.format(*self._encode_path_vars(model_id)) - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def list_models(self, - source=None, - target=None, - default_models=None, - **kwargs): - """ - List models. - - Lists available translation models. - - :param str source: Specify a language code to filter results by source language. - :param str target: Specify a language code to filter results by target language. - :param bool default_models: If the default parameter isn't specified, the service - will return all models (default and non-default) for each language pair. To return - only default models, set this to `true`. To return only non-default models, set - this to `false`. There is exactly one default model per language pair, the IBM - provided base model. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('language_translator', 'V3', - 'list_models') - headers.update(sdk_headers) - - params = { - 'version': self.version, - 'source': source, - 'target': target, - 'default': default_models - } - - url = '/v3/models' - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - -############################################################################## -# Models -############################################################################## - - -class DeleteModelResult(object): - """ - DeleteModelResult. - - :attr str status: "OK" indicates that the model was successfully deleted. - """ - - def __init__(self, status): - """ - Initialize a DeleteModelResult object. - - :param str status: "OK" indicates that the model was successfully deleted. - """ - self.status = status - - @classmethod - def _from_dict(cls, _dict): - """Initialize a DeleteModelResult object from a json dictionary.""" - args = {} - if 'status' in _dict: - args['status'] = _dict.get('status') - else: - raise ValueError( - 'Required property \'status\' not present in DeleteModelResult JSON' - ) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'status') and self.status is not None: - _dict['status'] = self.status - return _dict - - def __str__(self): - """Return a `str` version of this DeleteModelResult object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class IdentifiableLanguage(object): - """ - IdentifiableLanguage. - - :attr str language: The language code for an identifiable language. - :attr str name: The name of the identifiable language. - """ - - def __init__(self, language, name): - """ - Initialize a IdentifiableLanguage object. - - :param str language: The language code for an identifiable language. - :param str name: The name of the identifiable language. - """ - self.language = language - self.name = name - - @classmethod - def _from_dict(cls, _dict): - """Initialize a IdentifiableLanguage object from a json dictionary.""" - args = {} - if 'language' in _dict: - args['language'] = _dict.get('language') - else: - raise ValueError( - 'Required property \'language\' not present in IdentifiableLanguage JSON' - ) - if 'name' in _dict: - args['name'] = _dict.get('name') - else: - raise ValueError( - 'Required property \'name\' not present in IdentifiableLanguage JSON' - ) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'language') and self.language is not None: - _dict['language'] = self.language - if hasattr(self, 'name') and self.name is not None: - _dict['name'] = self.name - return _dict - - def __str__(self): - """Return a `str` version of this IdentifiableLanguage object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class IdentifiableLanguages(object): - """ - IdentifiableLanguages. - - :attr list[IdentifiableLanguage] languages: A list of all languages that the service - can identify. - """ - - def __init__(self, languages): - """ - Initialize a IdentifiableLanguages object. - - :param list[IdentifiableLanguage] languages: A list of all languages that the - service can identify. - """ - self.languages = languages - - @classmethod - def _from_dict(cls, _dict): - """Initialize a IdentifiableLanguages object from a json dictionary.""" - args = {} - if 'languages' in _dict: - args['languages'] = [ - IdentifiableLanguage._from_dict(x) - for x in (_dict.get('languages')) - ] - else: - raise ValueError( - 'Required property \'languages\' not present in IdentifiableLanguages JSON' - ) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'languages') and self.languages is not None: - _dict['languages'] = [x._to_dict() for x in self.languages] - return _dict - - def __str__(self): - """Return a `str` version of this IdentifiableLanguages object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class IdentifiedLanguage(object): - """ - IdentifiedLanguage. - - :attr str language: The language code for an identified language. - :attr float confidence: The confidence score for the identified language. - """ - - def __init__(self, language, confidence): - """ - Initialize a IdentifiedLanguage object. - - :param str language: The language code for an identified language. - :param float confidence: The confidence score for the identified language. - """ - self.language = language - self.confidence = confidence - - @classmethod - def _from_dict(cls, _dict): - """Initialize a IdentifiedLanguage object from a json dictionary.""" - args = {} - if 'language' in _dict: - args['language'] = _dict.get('language') - else: - raise ValueError( - 'Required property \'language\' not present in IdentifiedLanguage JSON' - ) - if 'confidence' in _dict: - args['confidence'] = _dict.get('confidence') - else: - raise ValueError( - 'Required property \'confidence\' not present in IdentifiedLanguage JSON' - ) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'language') and self.language is not None: - _dict['language'] = self.language - if hasattr(self, 'confidence') and self.confidence is not None: - _dict['confidence'] = self.confidence - return _dict - - def __str__(self): - """Return a `str` version of this IdentifiedLanguage object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class IdentifiedLanguages(object): - """ - IdentifiedLanguages. - - :attr list[IdentifiedLanguage] languages: A ranking of identified languages with - confidence scores. - """ - - def __init__(self, languages): - """ - Initialize a IdentifiedLanguages object. - - :param list[IdentifiedLanguage] languages: A ranking of identified languages with - confidence scores. - """ - self.languages = languages - - @classmethod - def _from_dict(cls, _dict): - """Initialize a IdentifiedLanguages object from a json dictionary.""" - args = {} - if 'languages' in _dict: - args['languages'] = [ - IdentifiedLanguage._from_dict(x) - for x in (_dict.get('languages')) - ] - else: - raise ValueError( - 'Required property \'languages\' not present in IdentifiedLanguages JSON' - ) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'languages') and self.languages is not None: - _dict['languages'] = [x._to_dict() for x in self.languages] - return _dict - - def __str__(self): - """Return a `str` version of this IdentifiedLanguages object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Translation(object): - """ - Translation. - - :attr str translation_output: Translation output in UTF-8. - """ - - def __init__(self, translation_output): - """ - Initialize a Translation object. - - :param str translation_output: Translation output in UTF-8. - """ - self.translation_output = translation_output - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Translation object from a json dictionary.""" - args = {} - if 'translation' in _dict or 'translation_output' in _dict: - args['translation_output'] = _dict.get('translation') or _dict.get( - 'translation_output') - else: - raise ValueError( - 'Required property \'translation\' not present in Translation JSON' - ) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr( - self, - 'translation_output') and self.translation_output is not None: - _dict['translation'] = self.translation_output - return _dict - - def __str__(self): - """Return a `str` version of this Translation object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class TranslationModel(object): - """ - Response payload for models. - - :attr str model_id: A globally unique string that identifies the underlying model that - is used for translation. - :attr str name: (optional) Optional name that can be specified when the model is - created. - :attr str source: (optional) Translation source language code. - :attr str target: (optional) Translation target language code. - :attr str base_model_id: (optional) Model ID of the base model that was used to - customize the model. If the model is not a custom model, this will be an empty string. - :attr str domain: (optional) The domain of the translation model. - :attr bool customizable: (optional) Whether this model can be used as a base for - customization. Customized models are not further customizable, and some base models - are not customizable. - :attr bool default_model: (optional) Whether or not the model is a default model. A - default model is the model for a given language pair that will be used when that - language pair is specified in the source and target parameters. - :attr str owner: (optional) Either an empty string, indicating the model is not a - custom model, or the ID of the service instance that created the model. - :attr str status: (optional) Availability of a model. - """ - - def __init__(self, - model_id, - name=None, - source=None, - target=None, - base_model_id=None, - domain=None, - customizable=None, - default_model=None, - owner=None, - status=None): - """ - Initialize a TranslationModel object. - - :param str model_id: A globally unique string that identifies the underlying model - that is used for translation. - :param str name: (optional) Optional name that can be specified when the model is - created. - :param str source: (optional) Translation source language code. - :param str target: (optional) Translation target language code. - :param str base_model_id: (optional) Model ID of the base model that was used to - customize the model. If the model is not a custom model, this will be an empty - string. - :param str domain: (optional) The domain of the translation model. - :param bool customizable: (optional) Whether this model can be used as a base for - customization. Customized models are not further customizable, and some base - models are not customizable. - :param bool default_model: (optional) Whether or not the model is a default model. - A default model is the model for a given language pair that will be used when that - language pair is specified in the source and target parameters. - :param str owner: (optional) Either an empty string, indicating the model is not a - custom model, or the ID of the service instance that created the model. - :param str status: (optional) Availability of a model. - """ - self.model_id = model_id - self.name = name - self.source = source - self.target = target - self.base_model_id = base_model_id - self.domain = domain - self.customizable = customizable - self.default_model = default_model - self.owner = owner - self.status = status - - @classmethod - def _from_dict(cls, _dict): - """Initialize a TranslationModel object from a json dictionary.""" - args = {} - if 'model_id' in _dict: - args['model_id'] = _dict.get('model_id') - else: - raise ValueError( - 'Required property \'model_id\' not present in TranslationModel JSON' - ) - if 'name' in _dict: - args['name'] = _dict.get('name') - if 'source' in _dict: - args['source'] = _dict.get('source') - if 'target' in _dict: - args['target'] = _dict.get('target') - if 'base_model_id' in _dict: - args['base_model_id'] = _dict.get('base_model_id') - if 'domain' in _dict: - args['domain'] = _dict.get('domain') - if 'customizable' in _dict: - args['customizable'] = _dict.get('customizable') - if 'default_model' in _dict: - args['default_model'] = _dict.get('default_model') - if 'owner' in _dict: - args['owner'] = _dict.get('owner') - if 'status' in _dict: - args['status'] = _dict.get('status') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'model_id') and self.model_id is not None: - _dict['model_id'] = self.model_id - if hasattr(self, 'name') and self.name is not None: - _dict['name'] = self.name - if hasattr(self, 'source') and self.source is not None: - _dict['source'] = self.source - if hasattr(self, 'target') and self.target is not None: - _dict['target'] = self.target - if hasattr(self, 'base_model_id') and self.base_model_id is not None: - _dict['base_model_id'] = self.base_model_id - if hasattr(self, 'domain') and self.domain is not None: - _dict['domain'] = self.domain - if hasattr(self, 'customizable') and self.customizable is not None: - _dict['customizable'] = self.customizable - if hasattr(self, 'default_model') and self.default_model is not None: - _dict['default_model'] = self.default_model - if hasattr(self, 'owner') and self.owner is not None: - _dict['owner'] = self.owner - if hasattr(self, 'status') and self.status is not None: - _dict['status'] = self.status - return _dict - - def __str__(self): - """Return a `str` version of this TranslationModel object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class TranslationModels(object): - """ - The response type for listing existing translation models. - - :attr list[TranslationModel] models: An array of available models. - """ - - def __init__(self, models): - """ - Initialize a TranslationModels object. - - :param list[TranslationModel] models: An array of available models. - """ - self.models = models - - @classmethod - def _from_dict(cls, _dict): - """Initialize a TranslationModels object from a json dictionary.""" - args = {} - if 'models' in _dict: - args['models'] = [ - TranslationModel._from_dict(x) for x in (_dict.get('models')) - ] - else: - raise ValueError( - 'Required property \'models\' not present in TranslationModels JSON' - ) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'models') and self.models is not None: - _dict['models'] = [x._to_dict() for x in self.models] - return _dict - - def __str__(self): - """Return a `str` version of this TranslationModels object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class TranslationResult(object): - """ - TranslationResult. - - :attr int word_count: Number of words in the input text. - :attr int character_count: Number of characters in the input text. - :attr list[Translation] translations: List of translation output in UTF-8, - corresponding to the input text entries. - """ - - def __init__(self, word_count, character_count, translations): - """ - Initialize a TranslationResult object. - - :param int word_count: Number of words in the input text. - :param int character_count: Number of characters in the input text. - :param list[Translation] translations: List of translation output in UTF-8, - corresponding to the input text entries. - """ - self.word_count = word_count - self.character_count = character_count - self.translations = translations - - @classmethod - def _from_dict(cls, _dict): - """Initialize a TranslationResult object from a json dictionary.""" - args = {} - if 'word_count' in _dict: - args['word_count'] = _dict.get('word_count') - else: - raise ValueError( - 'Required property \'word_count\' not present in TranslationResult JSON' - ) - if 'character_count' in _dict: - args['character_count'] = _dict.get('character_count') - else: - raise ValueError( - 'Required property \'character_count\' not present in TranslationResult JSON' - ) - if 'translations' in _dict: - args['translations'] = [ - Translation._from_dict(x) for x in (_dict.get('translations')) - ] - else: - raise ValueError( - 'Required property \'translations\' not present in TranslationResult JSON' - ) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'word_count') and self.word_count is not None: - _dict['word_count'] = self.word_count - if hasattr(self, - 'character_count') and self.character_count is not None: - _dict['character_count'] = self.character_count - if hasattr(self, 'translations') and self.translations is not None: - _dict['translations'] = [x._to_dict() for x in self.translations] - return _dict - - def __str__(self): - """Return a `str` version of this TranslationResult object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other diff --git a/ibm_watson/natural_language_classifier_v1.py b/ibm_watson/natural_language_classifier_v1.py deleted file mode 100644 index 779ce2c01..000000000 --- a/ibm_watson/natural_language_classifier_v1.py +++ /dev/null @@ -1,776 +0,0 @@ -# coding: utf-8 - -# Copyright 2018 IBM All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -IBM Watson™ Natural Language Classifier uses machine learning algorithms to return -the top matching predefined classes for short text input. You create and train a -classifier to connect predefined classes to example texts so that the service can apply -those classes to new inputs. -""" - -from __future__ import absolute_import - -import json -from .common import get_sdk_headers -from ibm_cloud_sdk_core import BaseService -from ibm_cloud_sdk_core import datetime_to_string, string_to_datetime - -############################################################################## -# Service -############################################################################## - - -class NaturalLanguageClassifierV1(BaseService): - """The Natural Language Classifier V1 service.""" - - default_url = 'https://gateway.watsonplatform.net/natural-language-classifier/api' - - def __init__( - self, - url=default_url, - username=None, - password=None, - iam_apikey=None, - iam_access_token=None, - iam_url=None, - ): - """ - Construct a new client for the Natural Language Classifier service. - - :param str url: The base url to use when contacting the service (e.g. - "https://gateway.watsonplatform.net/natural-language-classifier/api/natural-language-classifier/api"). - The base url may differ between IBM Cloud regions. - - :param str username: The username used to authenticate with the service. - Username and password credentials are only required to run your - application locally or outside of IBM Cloud. When running on - IBM Cloud, the credentials will be automatically loaded from the - `VCAP_SERVICES` environment variable. - - :param str password: The password used to authenticate with the service. - Username and password credentials are only required to run your - application locally or outside of BlueIBM Cloudmix. When running on - IBM Cloud, the credentials will be automatically loaded from the - `VCAP_SERVICES` environment variable. - - :param str iam_apikey: An API key that can be used to request IAM tokens. If - this API key is provided, the SDK will manage the token and handle the - refreshing. - - :param str iam_access_token: An IAM access token is fully managed by the application. - Responsibility falls on the application to refresh the token, either before - it expires or reactively upon receiving a 401 from the service as any requests - made with an expired token will fail. - - :param str iam_url: An optional URL for the IAM service API. Defaults to - 'https://iam.cloud.ibm.com/identity/token'. - """ - - BaseService.__init__( - self, - vcap_services_name='natural_language_classifier', - url=url, - username=username, - password=password, - iam_apikey=iam_apikey, - iam_access_token=iam_access_token, - iam_url=iam_url, - use_vcap_services=True, - display_name='Natural Language Classifier') - - ######################### - # Classify text - ######################### - - def classify(self, classifier_id, text, **kwargs): - """ - Classify a phrase. - - Returns label information for the input. The status must be `Available` before you - can use the classifier to classify text. - - :param str classifier_id: Classifier ID to use. - :param str text: The submitted phrase. The maximum length is 2048 characters. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if classifier_id is None: - raise ValueError('classifier_id must be provided') - if text is None: - raise ValueError('text must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('natural_language_classifier', 'V1', - 'classify') - headers.update(sdk_headers) - - data = {'text': text} - - url = '/v1/classifiers/{0}/classify'.format( - *self._encode_path_vars(classifier_id)) - response = self.request( - method='POST', - url=url, - headers=headers, - json=data, - accept_json=True) - return response - - def classify_collection(self, classifier_id, collection, **kwargs): - """ - Classify multiple phrases. - - Returns label information for multiple phrases. The status must be `Available` - before you can use the classifier to classify text. - Note that classifying Japanese texts is a beta feature. - - :param str classifier_id: Classifier ID to use. - :param list[ClassifyInput] collection: The submitted phrases. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if classifier_id is None: - raise ValueError('classifier_id must be provided') - if collection is None: - raise ValueError('collection must be provided') - collection = [self._convert_model(x, ClassifyInput) for x in collection] - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('natural_language_classifier', 'V1', - 'classify_collection') - headers.update(sdk_headers) - - data = {'collection': collection} - - url = '/v1/classifiers/{0}/classify_collection'.format( - *self._encode_path_vars(classifier_id)) - response = self.request( - method='POST', - url=url, - headers=headers, - json=data, - accept_json=True) - return response - - ######################### - # Manage classifiers - ######################### - - def create_classifier(self, metadata, training_data, **kwargs): - """ - Create classifier. - - Sends data to create and train a classifier and returns information about the new - classifier. - - :param file metadata: Metadata in JSON format. The metadata identifies the - language of the data, and an optional name to identify the classifier. Specify the - language with the 2-letter primary language code as assigned in ISO standard 639. - Supported languages are English (`en`), Arabic (`ar`), French (`fr`), German, - (`de`), Italian (`it`), Japanese (`ja`), Korean (`ko`), Brazilian Portuguese - (`pt`), and Spanish (`es`). - :param file training_data: Training data in CSV format. Each text value must have - at least one class. The data can include up to 3,000 classes and 20,000 records. - For details, see [Data - preparation](https://cloud.ibm.com/docs/services/natural-language-classifier/using-your-data.html). - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if metadata is None: - raise ValueError('metadata must be provided') - if training_data is None: - raise ValueError('training_data must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('natural_language_classifier', 'V1', - 'create_classifier') - headers.update(sdk_headers) - - form_data = {} - form_data['training_metadata'] = (None, metadata, 'application/json') - form_data['training_data'] = (None, training_data, 'text/csv') - - url = '/v1/classifiers' - response = self.request( - method='POST', - url=url, - headers=headers, - files=form_data, - accept_json=True) - return response - - def delete_classifier(self, classifier_id, **kwargs): - """ - Delete classifier. - - :param str classifier_id: Classifier ID to delete. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if classifier_id is None: - raise ValueError('classifier_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('natural_language_classifier', 'V1', - 'delete_classifier') - headers.update(sdk_headers) - - url = '/v1/classifiers/{0}'.format( - *self._encode_path_vars(classifier_id)) - response = self.request( - method='DELETE', url=url, headers=headers, accept_json=True) - return response - - def get_classifier(self, classifier_id, **kwargs): - """ - Get information about a classifier. - - Returns status and other information about a classifier. - - :param str classifier_id: Classifier ID to query. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if classifier_id is None: - raise ValueError('classifier_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('natural_language_classifier', 'V1', - 'get_classifier') - headers.update(sdk_headers) - - url = '/v1/classifiers/{0}'.format( - *self._encode_path_vars(classifier_id)) - response = self.request( - method='GET', url=url, headers=headers, accept_json=True) - return response - - def list_classifiers(self, **kwargs): - """ - List classifiers. - - Returns an empty array if no classifiers are available. - - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('natural_language_classifier', 'V1', - 'list_classifiers') - headers.update(sdk_headers) - - url = '/v1/classifiers' - response = self.request( - method='GET', url=url, headers=headers, accept_json=True) - return response - - -############################################################################## -# Models -############################################################################## - - -class Classification(object): - """ - Response from the classifier for a phrase. - - :attr str classifier_id: (optional) Unique identifier for this classifier. - :attr str url: (optional) Link to the classifier. - :attr str text: (optional) The submitted phrase. - :attr str top_class: (optional) The class with the highest confidence. - :attr list[ClassifiedClass] classes: (optional) An array of up to ten class-confidence - pairs sorted in descending order of confidence. - """ - - def __init__(self, - classifier_id=None, - url=None, - text=None, - top_class=None, - classes=None): - """ - Initialize a Classification object. - - :param str classifier_id: (optional) Unique identifier for this classifier. - :param str url: (optional) Link to the classifier. - :param str text: (optional) The submitted phrase. - :param str top_class: (optional) The class with the highest confidence. - :param list[ClassifiedClass] classes: (optional) An array of up to ten - class-confidence pairs sorted in descending order of confidence. - """ - self.classifier_id = classifier_id - self.url = url - self.text = text - self.top_class = top_class - self.classes = classes - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Classification object from a json dictionary.""" - args = {} - if 'classifier_id' in _dict: - args['classifier_id'] = _dict.get('classifier_id') - if 'url' in _dict: - args['url'] = _dict.get('url') - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'top_class' in _dict: - args['top_class'] = _dict.get('top_class') - if 'classes' in _dict: - args['classes'] = [ - ClassifiedClass._from_dict(x) for x in (_dict.get('classes')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'classifier_id') and self.classifier_id is not None: - _dict['classifier_id'] = self.classifier_id - if hasattr(self, 'url') and self.url is not None: - _dict['url'] = self.url - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - if hasattr(self, 'top_class') and self.top_class is not None: - _dict['top_class'] = self.top_class - if hasattr(self, 'classes') and self.classes is not None: - _dict['classes'] = [x._to_dict() for x in self.classes] - return _dict - - def __str__(self): - """Return a `str` version of this Classification object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class ClassificationCollection(object): - """ - Response from the classifier for multiple phrases. - - :attr str classifier_id: (optional) Unique identifier for this classifier. - :attr str url: (optional) Link to the classifier. - :attr list[CollectionItem] collection: (optional) An array of classifier responses for - each submitted phrase. - """ - - def __init__(self, classifier_id=None, url=None, collection=None): - """ - Initialize a ClassificationCollection object. - - :param str classifier_id: (optional) Unique identifier for this classifier. - :param str url: (optional) Link to the classifier. - :param list[CollectionItem] collection: (optional) An array of classifier - responses for each submitted phrase. - """ - self.classifier_id = classifier_id - self.url = url - self.collection = collection - - @classmethod - def _from_dict(cls, _dict): - """Initialize a ClassificationCollection object from a json dictionary.""" - args = {} - if 'classifier_id' in _dict: - args['classifier_id'] = _dict.get('classifier_id') - if 'url' in _dict: - args['url'] = _dict.get('url') - if 'collection' in _dict: - args['collection'] = [ - CollectionItem._from_dict(x) for x in (_dict.get('collection')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'classifier_id') and self.classifier_id is not None: - _dict['classifier_id'] = self.classifier_id - if hasattr(self, 'url') and self.url is not None: - _dict['url'] = self.url - if hasattr(self, 'collection') and self.collection is not None: - _dict['collection'] = [x._to_dict() for x in self.collection] - return _dict - - def __str__(self): - """Return a `str` version of this ClassificationCollection object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class ClassifiedClass(object): - """ - Class and confidence. - - :attr float confidence: (optional) A decimal percentage that represents the confidence - that Watson has in this class. Higher values represent higher confidences. - :attr str class_name: (optional) Class label. - """ - - def __init__(self, confidence=None, class_name=None): - """ - Initialize a ClassifiedClass object. - - :param float confidence: (optional) A decimal percentage that represents the - confidence that Watson has in this class. Higher values represent higher - confidences. - :param str class_name: (optional) Class label. - """ - self.confidence = confidence - self.class_name = class_name - - @classmethod - def _from_dict(cls, _dict): - """Initialize a ClassifiedClass object from a json dictionary.""" - args = {} - if 'confidence' in _dict: - args['confidence'] = _dict.get('confidence') - if 'class_name' in _dict: - args['class_name'] = _dict.get('class_name') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'confidence') and self.confidence is not None: - _dict['confidence'] = self.confidence - if hasattr(self, 'class_name') and self.class_name is not None: - _dict['class_name'] = self.class_name - return _dict - - def __str__(self): - """Return a `str` version of this ClassifiedClass object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Classifier(object): - """ - A classifier for natural language phrases. - - :attr str name: (optional) User-supplied name for the classifier. - :attr str url: Link to the classifier. - :attr str status: (optional) The state of the classifier. - :attr str classifier_id: Unique identifier for this classifier. - :attr datetime created: (optional) Date and time (UTC) the classifier was created. - :attr str status_description: (optional) Additional detail about the status. - :attr str language: (optional) The language used for the classifier. - """ - - def __init__(self, - url, - classifier_id, - name=None, - status=None, - created=None, - status_description=None, - language=None): - """ - Initialize a Classifier object. - - :param str url: Link to the classifier. - :param str classifier_id: Unique identifier for this classifier. - :param str name: (optional) User-supplied name for the classifier. - :param str status: (optional) The state of the classifier. - :param datetime created: (optional) Date and time (UTC) the classifier was - created. - :param str status_description: (optional) Additional detail about the status. - :param str language: (optional) The language used for the classifier. - """ - self.name = name - self.url = url - self.status = status - self.classifier_id = classifier_id - self.created = created - self.status_description = status_description - self.language = language - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Classifier object from a json dictionary.""" - args = {} - if 'name' in _dict: - args['name'] = _dict.get('name') - if 'url' in _dict: - args['url'] = _dict.get('url') - else: - raise ValueError( - 'Required property \'url\' not present in Classifier JSON') - if 'status' in _dict: - args['status'] = _dict.get('status') - if 'classifier_id' in _dict: - args['classifier_id'] = _dict.get('classifier_id') - else: - raise ValueError( - 'Required property \'classifier_id\' not present in Classifier JSON' - ) - if 'created' in _dict: - args['created'] = string_to_datetime(_dict.get('created')) - if 'status_description' in _dict: - args['status_description'] = _dict.get('status_description') - if 'language' in _dict: - args['language'] = _dict.get('language') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'name') and self.name is not None: - _dict['name'] = self.name - if hasattr(self, 'url') and self.url is not None: - _dict['url'] = self.url - if hasattr(self, 'status') and self.status is not None: - _dict['status'] = self.status - if hasattr(self, 'classifier_id') and self.classifier_id is not None: - _dict['classifier_id'] = self.classifier_id - if hasattr(self, 'created') and self.created is not None: - _dict['created'] = datetime_to_string(self.created) - if hasattr( - self, - 'status_description') and self.status_description is not None: - _dict['status_description'] = self.status_description - if hasattr(self, 'language') and self.language is not None: - _dict['language'] = self.language - return _dict - - def __str__(self): - """Return a `str` version of this Classifier object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class ClassifierList(object): - """ - List of available classifiers. - - :attr list[Classifier] classifiers: The classifiers available to the user. Returns an - empty array if no classifiers are available. - """ - - def __init__(self, classifiers): - """ - Initialize a ClassifierList object. - - :param list[Classifier] classifiers: The classifiers available to the user. - Returns an empty array if no classifiers are available. - """ - self.classifiers = classifiers - - @classmethod - def _from_dict(cls, _dict): - """Initialize a ClassifierList object from a json dictionary.""" - args = {} - if 'classifiers' in _dict: - args['classifiers'] = [ - Classifier._from_dict(x) for x in (_dict.get('classifiers')) - ] - else: - raise ValueError( - 'Required property \'classifiers\' not present in ClassifierList JSON' - ) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'classifiers') and self.classifiers is not None: - _dict['classifiers'] = [x._to_dict() for x in self.classifiers] - return _dict - - def __str__(self): - """Return a `str` version of this ClassifierList object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class ClassifyInput(object): - """ - Request payload to classify. - - :attr str text: The submitted phrase. The maximum length is 2048 characters. - """ - - def __init__(self, text): - """ - Initialize a ClassifyInput object. - - :param str text: The submitted phrase. The maximum length is 2048 characters. - """ - self.text = text - - @classmethod - def _from_dict(cls, _dict): - """Initialize a ClassifyInput object from a json dictionary.""" - args = {} - if 'text' in _dict: - args['text'] = _dict.get('text') - else: - raise ValueError( - 'Required property \'text\' not present in ClassifyInput JSON') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - return _dict - - def __str__(self): - """Return a `str` version of this ClassifyInput object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class CollectionItem(object): - """ - Response from the classifier for a phrase in a collection. - - :attr str text: (optional) The submitted phrase. The maximum length is 2048 - characters. - :attr str top_class: (optional) The class with the highest confidence. - :attr list[ClassifiedClass] classes: (optional) An array of up to ten class-confidence - pairs sorted in descending order of confidence. - """ - - def __init__(self, text=None, top_class=None, classes=None): - """ - Initialize a CollectionItem object. - - :param str text: (optional) The submitted phrase. The maximum length is 2048 - characters. - :param str top_class: (optional) The class with the highest confidence. - :param list[ClassifiedClass] classes: (optional) An array of up to ten - class-confidence pairs sorted in descending order of confidence. - """ - self.text = text - self.top_class = top_class - self.classes = classes - - @classmethod - def _from_dict(cls, _dict): - """Initialize a CollectionItem object from a json dictionary.""" - args = {} - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'top_class' in _dict: - args['top_class'] = _dict.get('top_class') - if 'classes' in _dict: - args['classes'] = [ - ClassifiedClass._from_dict(x) for x in (_dict.get('classes')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - if hasattr(self, 'top_class') and self.top_class is not None: - _dict['top_class'] = self.top_class - if hasattr(self, 'classes') and self.classes is not None: - _dict['classes'] = [x._to_dict() for x in self.classes] - return _dict - - def __str__(self): - """Return a `str` version of this CollectionItem object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other diff --git a/ibm_watson/natural_language_understanding_v1.py b/ibm_watson/natural_language_understanding_v1.py index e537020f4..c3561e275 100644 --- a/ibm_watson/natural_language_understanding_v1.py +++ b/ibm_watson/natural_language_understanding_v1.py @@ -1,6 +1,6 @@ # coding: utf-8 -# Copyright 2018 IBM All Rights Reserved. +# (C) Copyright IBM Corp. 2017, 2024. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,23 +13,33 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +# IBM OpenAPI SDK Code Generator Version: 3.97.0-0e90eab1-20241120-170029 """ Analyze various features of text content at scale. Provide text, raw HTML, or a public URL and IBM Watson Natural Language Understanding will give you results for the features you request. The service cleans HTML content before analysis by default, so the results can ignore most advertisements and other unwanted content. You can create [custom -models](https://cloud.ibm.com/docs/services/natural-language-understanding/customizing.html) -with Watson Knowledge Studio to detect custom entities, relations, and categories in -Natural Language Understanding. -""" +models](https://cloud.ibm.com/docs/natural-language-understanding?topic=natural-language-understanding-customizing) +with Watson Knowledge Studio to detect custom entities and relations in Natural Language +Understanding. -from __future__ import absolute_import +API Version: 1.0 +See: https://cloud.ibm.com/docs/natural-language-understanding +""" +from datetime import datetime +from enum import Enum +from typing import BinaryIO, Dict, List, Optional import json + +from ibm_cloud_sdk_core import BaseService, DetailedResponse +from ibm_cloud_sdk_core.authenticators.authenticator import Authenticator +from ibm_cloud_sdk_core.get_authenticator import get_authenticator_from_environment +from ibm_cloud_sdk_core.utils import convert_model, datetime_to_string, string_to_datetime + from .common import get_sdk_headers -from ibm_cloud_sdk_core import BaseService -from ibm_cloud_sdk_core import datetime_to_string, string_to_datetime ############################################################################## # Service @@ -39,95 +49,61 @@ class NaturalLanguageUnderstandingV1(BaseService): """The Natural Language Understanding V1 service.""" - default_url = 'https://gateway.watsonplatform.net/natural-language-understanding/api' + DEFAULT_SERVICE_URL = 'https://api.us-south.natural-language-understanding.watson.cloud.ibm.com' + DEFAULT_SERVICE_NAME = 'natural-language-understanding' def __init__( - self, - version, - url=default_url, - username=None, - password=None, - iam_apikey=None, - iam_access_token=None, - iam_url=None, - ): + self, + version: str, + authenticator: Authenticator = None, + service_name: str = DEFAULT_SERVICE_NAME, + ) -> None: """ Construct a new client for the Natural Language Understanding service. - :param str version: The API version date to use with the service, in - "YYYY-MM-DD" format. Whenever the API is changed in a backwards - incompatible way, a new minor version of the API is released. - The service uses the API version for the date you specify, or - the most recent version before that date. Note that you should - not programmatically specify the current date at runtime, in - case the API has been updated since your application's release. - Instead, specify a version date that is compatible with your - application, and don't change it until your application is - ready for a later version. - - :param str url: The base url to use when contacting the service (e.g. - "https://gateway.watsonplatform.net/natural-language-understanding/api/natural-language-understanding/api"). - The base url may differ between IBM Cloud regions. - - :param str username: The username used to authenticate with the service. - Username and password credentials are only required to run your - application locally or outside of IBM Cloud. When running on - IBM Cloud, the credentials will be automatically loaded from the - `VCAP_SERVICES` environment variable. - - :param str password: The password used to authenticate with the service. - Username and password credentials are only required to run your - application locally or outside of IBM Cloud. When running on - IBM Cloud, the credentials will be automatically loaded from the - `VCAP_SERVICES` environment variable. - - :param str iam_apikey: An API key that can be used to request IAM tokens. If - this API key is provided, the SDK will manage the token and handle the - refreshing. - - :param str iam_access_token: An IAM access token is fully managed by the application. - Responsibility falls on the application to refresh the token, either before - it expires or reactively upon receiving a 401 from the service as any requests - made with an expired token will fail. - - :param str iam_url: An optional URL for the IAM service API. Defaults to - 'https://iam.cloud.ibm.com/identity/token'. - """ - - BaseService.__init__( - self, - vcap_services_name='natural-language-understanding', - url=url, - username=username, - password=password, - iam_apikey=iam_apikey, - iam_access_token=iam_access_token, - iam_url=iam_url, - use_vcap_services=True, - display_name='Natural Language Understanding') + :param str version: Release date of the API version you want to use. + Specify dates in YYYY-MM-DD format. The current version is `2022-04-07`. + + :param Authenticator authenticator: The authenticator specifies the authentication mechanism. + Get up to date information from https://github.com/IBM/python-sdk-core/blob/main/README.md + about initializing the authenticator of your choice. + """ + if version is None: + raise ValueError('version must be provided') + + if not authenticator: + authenticator = get_authenticator_from_environment(service_name) + BaseService.__init__(self, + service_url=self.DEFAULT_SERVICE_URL, + authenticator=authenticator) self.version = version + self.configure_service(service_name) ######################### # Analyze ######################### - def analyze(self, - features, - text=None, - html=None, - url=None, - clean=None, - xpath=None, - fallback_to_raw=None, - return_analyzed_text=None, - language=None, - limit_text_characters=None, - **kwargs): + def analyze( + self, + features: 'Features', + *, + text: Optional[str] = None, + html: Optional[str] = None, + url: Optional[str] = None, + clean: Optional[bool] = None, + xpath: Optional[str] = None, + fallback_to_raw: Optional[bool] = None, + return_analyzed_text: Optional[bool] = None, + language: Optional[str] = None, + limit_text_characters: Optional[int] = None, + **kwargs, + ) -> DetailedResponse: """ Analyze text. Analyzes text, HTML, or a public webpage for the following features: - Categories + - Classifications - Concepts - Emotion - Entities @@ -136,51 +112,56 @@ def analyze(self, - Relations - Semantic roles - Sentiment - - Syntax (Experimental). + - Syntax + If a language for the input text is not specified with the `language` parameter, + the service [automatically detects the + language](https://cloud.ibm.com/docs/natural-language-understanding?topic=natural-language-understanding-detectable-languages). :param Features features: Specific features to analyze the document for. - :param str text: The plain text to analyze. One of the `text`, `html`, or `url` - parameters is required. - :param str html: The HTML file to analyze. One of the `text`, `html`, or `url` - parameters is required. - :param str url: The webpage to analyze. One of the `text`, `html`, or `url` - parameters is required. - :param bool clean: Set this to `false` to disable webpage cleaning. To learn more - about webpage cleaning, see the [Analyzing - webpages](https://cloud.ibm.com/docs/services/natural-language-understanding/analyzing-webpages.html) - documentation. - :param str xpath: An [XPath - query](https://cloud.ibm.com/docs/services/natural-language-understanding/analyzing-webpages.html#xpath) - to perform on `html` or `url` input. Results of the query will be appended to the - cleaned webpage text before it is analyzed. To analyze only the results of the - XPath query, set the `clean` parameter to `false`. - :param bool fallback_to_raw: Whether to use raw HTML content if text cleaning - fails. - :param bool return_analyzed_text: Whether or not to return the analyzed text. - :param str language: ISO 639-1 code that specifies the language of your text. This - overrides automatic language detection. Language support differs depending on the - features you include in your analysis. See [Language - support](https://cloud.ibm.com/docs/services/natural-language-understanding?topic=natural-language-understanding-language-support) - for more information. - :param int limit_text_characters: Sets the maximum number of characters that are - processed by the service. + :param str text: (optional) The plain text to analyze. One of the `text`, + `html`, or `url` parameters is required. + :param str html: (optional) The HTML file to analyze. One of the `text`, + `html`, or `url` parameters is required. + :param str url: (optional) The webpage to analyze. One of the `text`, + `html`, or `url` parameters is required. + :param bool clean: (optional) Set this to `false` to disable webpage + cleaning. For more information about webpage cleaning, see [Analyzing + webpages](https://cloud.ibm.com/docs/natural-language-understanding?topic=natural-language-understanding-analyzing-webpages). + :param str xpath: (optional) An [XPath + query](https://cloud.ibm.com/docs/natural-language-understanding?topic=natural-language-understanding-analyzing-webpages#xpath) + to perform on `html` or `url` input. Results of the query will be appended + to the cleaned webpage text before it is analyzed. To analyze only the + results of the XPath query, set the `clean` parameter to `false`. + :param bool fallback_to_raw: (optional) Whether to use raw HTML content if + text cleaning fails. + :param bool return_analyzed_text: (optional) Whether or not to return the + analyzed text. + :param str language: (optional) ISO 639-1 code that specifies the language + of your text. This overrides automatic language detection. Language support + differs depending on the features you include in your analysis. For more + information, see [Language + support](https://cloud.ibm.com/docs/natural-language-understanding?topic=natural-language-understanding-language-support). + :param int limit_text_characters: (optional) Sets the maximum number of + characters that are processed by the service. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `AnalysisResults` object """ if features is None: raise ValueError('features must be provided') - features = self._convert_model(features, Features) - + features = convert_model(features) headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('natural-language-understanding', 'V1', - 'analyze') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='analyze', + ) headers.update(sdk_headers) - params = {'version': self.version} + params = { + 'version': self.version, + } data = { 'features': features, @@ -192,24 +173,82 @@ def analyze(self, 'fallback_to_raw': fallback_to_raw, 'return_analyzed_text': return_analyzed_text, 'language': language, - 'limit_text_characters': limit_text_characters + 'limit_text_characters': limit_text_characters, } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' url = '/v1/analyze' - response = self.request( + request = self.prepare_request( method='POST', url=url, headers=headers, params=params, - json=data, - accept_json=True) + data=data, + ) + + response = self.send(request, **kwargs) return response ######################### # Manage models ######################### - def delete_model(self, model_id, **kwargs): + def list_models( + self, + **kwargs, + ) -> DetailedResponse: + """ + List models. + + Lists Watson Knowledge Studio [custom entities and relations + models](https://cloud.ibm.com/docs/natural-language-understanding?topic=natural-language-understanding-customizing) + that are deployed to your Natural Language Understanding service. + + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `ListModelsResults` object + """ + + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='list_models', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + url = '/v1/models' + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def delete_model( + self, + model_id: str, + **kwargs, + ) -> DetailedResponse: """ Delete model. @@ -218,559 +257,2256 @@ def delete_model(self, model_id, **kwargs): :param str model_id: Model ID of the model to delete. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `DeleteModelResults` object + """ + + if not model_id: + raise ValueError('model_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='delete_model', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['model_id'] + path_param_values = self.encode_path_vars(model_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/models/{model_id}'.format(**path_param_dict) + request = self.prepare_request( + method='DELETE', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + ######################### + # Manage categories models + ######################### + + def create_categories_model( + self, + language: str, + training_data: BinaryIO, + *, + training_data_content_type: Optional[str] = None, + name: Optional[str] = None, + user_metadata: Optional[dict] = None, + description: Optional[str] = None, + model_version: Optional[str] = None, + workspace_id: Optional[str] = None, + version_description: Optional[str] = None, + **kwargs, + ) -> DetailedResponse: + """ + Create categories model. + + (Beta) Creates a custom categories model by uploading training data and associated + metadata. The model begins the training and deploying process and is ready to use + when the `status` is `available`. + + :param str language: The 2-letter language code of this model. + :param BinaryIO training_data: Training data in JSON format. For more + information, see [Categories training data + requirements](https://cloud.ibm.com/docs/natural-language-understanding?topic=natural-language-understanding-categories##categories-training-data-requirements). + :param str training_data_content_type: (optional) The content type of + training_data. + :param str name: (optional) An optional name for the model. + :param dict user_metadata: (optional) An optional map of metadata key-value + pairs to store with this model. + :param str description: (optional) An optional description of the model. + :param str model_version: (optional) An optional version string. + :param str workspace_id: (optional) ID of the Watson Knowledge Studio + workspace that deployed this model to Natural Language Understanding. + :param str version_description: (optional) The description of the version. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `CategoriesModel` object """ - if model_id is None: + if not language: + raise ValueError('language must be provided') + if training_data is None: + raise ValueError('training_data must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='create_categories_model', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + form_data = [] + form_data.append(('language', (None, language, 'text/plain'))) + form_data.append(('training_data', + (None, training_data, training_data_content_type or + 'application/octet-stream'))) + if name: + form_data.append(('name', (None, name, 'text/plain'))) + if user_metadata: + form_data.append(('user_metadata', (None, json.dumps(user_metadata), + 'application/json'))) + if description: + form_data.append(('description', (None, description, 'text/plain'))) + if model_version: + form_data.append( + ('model_version', (None, model_version, 'text/plain'))) + if workspace_id: + form_data.append( + ('workspace_id', (None, workspace_id, 'text/plain'))) + if version_description: + form_data.append(('version_description', (None, version_description, + 'text/plain'))) + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + url = '/v1/models/categories' + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + files=form_data, + ) + + response = self.send(request, **kwargs) + return response + + def list_categories_models( + self, + **kwargs, + ) -> DetailedResponse: + """ + List categories models. + + (Beta) Returns all custom categories models associated with this service instance. + + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `CategoriesModelList` object + """ + + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='list_categories_models', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + url = '/v1/models/categories' + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def get_categories_model( + self, + model_id: str, + **kwargs, + ) -> DetailedResponse: + """ + Get categories model details. + + (Beta) Returns the status of the categories model with the given model ID. + + :param str model_id: ID of the model. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `CategoriesModel` object + """ + + if not model_id: raise ValueError('model_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='get_categories_model', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['model_id'] + path_param_values = self.encode_path_vars(model_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/models/categories/{model_id}'.format(**path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + response = self.send(request, **kwargs) + return response + + def update_categories_model( + self, + model_id: str, + language: str, + training_data: BinaryIO, + *, + training_data_content_type: Optional[str] = None, + name: Optional[str] = None, + user_metadata: Optional[dict] = None, + description: Optional[str] = None, + model_version: Optional[str] = None, + workspace_id: Optional[str] = None, + version_description: Optional[str] = None, + **kwargs, + ) -> DetailedResponse: + """ + Update categories model. + + (Beta) Overwrites the training data associated with this custom categories model + and retrains the model. The new model replaces the current deployment. + + :param str model_id: ID of the model. + :param str language: The 2-letter language code of this model. + :param BinaryIO training_data: Training data in JSON format. For more + information, see [Categories training data + requirements](https://cloud.ibm.com/docs/natural-language-understanding?topic=natural-language-understanding-categories##categories-training-data-requirements). + :param str training_data_content_type: (optional) The content type of + training_data. + :param str name: (optional) An optional name for the model. + :param dict user_metadata: (optional) An optional map of metadata key-value + pairs to store with this model. + :param str description: (optional) An optional description of the model. + :param str model_version: (optional) An optional version string. + :param str workspace_id: (optional) ID of the Watson Knowledge Studio + workspace that deployed this model to Natural Language Understanding. + :param str version_description: (optional) The description of the version. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `CategoriesModel` object + """ + + if not model_id: + raise ValueError('model_id must be provided') + if not language: + raise ValueError('language must be provided') + if training_data is None: + raise ValueError('training_data must be provided') headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='update_categories_model', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + form_data = [] + form_data.append(('language', (None, language, 'text/plain'))) + form_data.append(('training_data', + (None, training_data, training_data_content_type or + 'application/octet-stream'))) + if name: + form_data.append(('name', (None, name, 'text/plain'))) + if user_metadata: + form_data.append(('user_metadata', (None, json.dumps(user_metadata), + 'application/json'))) + if description: + form_data.append(('description', (None, description, 'text/plain'))) + if model_version: + form_data.append( + ('model_version', (None, model_version, 'text/plain'))) + if workspace_id: + form_data.append( + ('workspace_id', (None, workspace_id, 'text/plain'))) + if version_description: + form_data.append(('version_description', (None, version_description, + 'text/plain'))) + if 'headers' in kwargs: headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('natural-language-understanding', 'V1', - 'delete_model') + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['model_id'] + path_param_values = self.encode_path_vars(model_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/models/categories/{model_id}'.format(**path_param_dict) + request = self.prepare_request( + method='PUT', + url=url, + headers=headers, + params=params, + files=form_data, + ) + + response = self.send(request, **kwargs) + return response + + def delete_categories_model( + self, + model_id: str, + **kwargs, + ) -> DetailedResponse: + """ + Delete categories model. + + (Beta) Un-deploys the custom categories model with the given model ID and deletes + all associated customer data, including any training data or binary artifacts. + + :param str model_id: ID of the model. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `DeleteModelResults` object + """ + + if not model_id: + raise ValueError('model_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='delete_categories_model', + ) headers.update(sdk_headers) - params = {'version': self.version} + params = { + 'version': self.version, + } - url = '/v1/models/{0}'.format(*self._encode_path_vars(model_id)) - response = self.request( + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['model_id'] + path_param_values = self.encode_path_vars(model_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/models/categories/{model_id}'.format(**path_param_dict) + request = self.prepare_request( method='DELETE', url=url, headers=headers, params=params, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response - def list_models(self, **kwargs): + ######################### + # Manage classifications models + ######################### + + def create_classifications_model( + self, + language: str, + training_data: BinaryIO, + *, + training_data_content_type: Optional[str] = None, + name: Optional[str] = None, + user_metadata: Optional[dict] = None, + description: Optional[str] = None, + model_version: Optional[str] = None, + workspace_id: Optional[str] = None, + version_description: Optional[str] = None, + training_parameters: Optional[ + 'ClassificationsTrainingParameters'] = None, + **kwargs, + ) -> DetailedResponse: + """ + Create classifications model. + + Creates a custom classifications model by uploading training data and associated + metadata. The model begins the training and deploying process and is ready to use + when the `status` is `available`. + + :param str language: The 2-letter language code of this model. + :param BinaryIO training_data: Training data in JSON format. For more + information, see [Classifications training data + requirements](https://cloud.ibm.com/docs/natural-language-understanding?topic=natural-language-understanding-classifications#classification-training-data-requirements). + :param str training_data_content_type: (optional) The content type of + training_data. + :param str name: (optional) An optional name for the model. + :param dict user_metadata: (optional) An optional map of metadata key-value + pairs to store with this model. + :param str description: (optional) An optional description of the model. + :param str model_version: (optional) An optional version string. + :param str workspace_id: (optional) ID of the Watson Knowledge Studio + workspace that deployed this model to Natural Language Understanding. + :param str version_description: (optional) The description of the version. + :param ClassificationsTrainingParameters training_parameters: (optional) + Optional classifications training parameters along with model train + requests. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `ClassificationsModel` object """ - List models. - Lists Watson Knowledge Studio [custom - models](https://cloud.ibm.com/docs/services/natural-language-understanding/customizing.html) - that are deployed to your Natural Language Understanding service. + if not language: + raise ValueError('language must be provided') + if training_data is None: + raise ValueError('training_data must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='create_classifications_model', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + form_data = [] + form_data.append(('language', (None, language, 'text/plain'))) + form_data.append(('training_data', + (None, training_data, training_data_content_type or + 'application/octet-stream'))) + if name: + form_data.append(('name', (None, name, 'text/plain'))) + if user_metadata: + form_data.append(('user_metadata', (None, json.dumps(user_metadata), + 'application/json'))) + if description: + form_data.append(('description', (None, description, 'text/plain'))) + if model_version: + form_data.append( + ('model_version', (None, model_version, 'text/plain'))) + if workspace_id: + form_data.append( + ('workspace_id', (None, workspace_id, 'text/plain'))) + if version_description: + form_data.append(('version_description', (None, version_description, + 'text/plain'))) + if training_parameters: + form_data.append( + ('training_parameters', (None, json.dumps(training_parameters), + 'application/json'))) + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + url = '/v1/models/classifications' + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + files=form_data, + ) + + response = self.send(request, **kwargs) + return response + + def list_classifications_models( + self, + **kwargs, + ) -> DetailedResponse: + """ + List classifications models. + + Returns all custom classifications models associated with this service instance. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `ClassificationsModelList` object """ headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='list_classifications_models', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + if 'headers' in kwargs: headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('natural-language-understanding', 'V1', - 'list_models') + del kwargs['headers'] + headers['Accept'] = 'application/json' + + url = '/v1/models/classifications' + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + def get_classifications_model( + self, + model_id: str, + **kwargs, + ) -> DetailedResponse: + """ + Get classifications model details. + + Returns the status of the classifications model with the given model ID. + + :param str model_id: ID of the model. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `ClassificationsModel` object + """ + + if not model_id: + raise ValueError('model_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='get_classifications_model', + ) headers.update(sdk_headers) - params = {'version': self.version} + params = { + 'version': self.version, + } - url = '/v1/models' - response = self.request( + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['model_id'] + path_param_values = self.encode_path_vars(model_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/models/classifications/{model_id}'.format(**path_param_dict) + request = self.prepare_request( method='GET', url=url, headers=headers, params=params, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response + def update_classifications_model( + self, + model_id: str, + language: str, + training_data: BinaryIO, + *, + training_data_content_type: Optional[str] = None, + name: Optional[str] = None, + user_metadata: Optional[dict] = None, + description: Optional[str] = None, + model_version: Optional[str] = None, + workspace_id: Optional[str] = None, + version_description: Optional[str] = None, + training_parameters: Optional[ + 'ClassificationsTrainingParameters'] = None, + **kwargs, + ) -> DetailedResponse: + """ + Update classifications model. + + Overwrites the training data associated with this custom classifications model and + retrains the model. The new model replaces the current deployment. + + :param str model_id: ID of the model. + :param str language: The 2-letter language code of this model. + :param BinaryIO training_data: Training data in JSON format. For more + information, see [Classifications training data + requirements](https://cloud.ibm.com/docs/natural-language-understanding?topic=natural-language-understanding-classifications#classification-training-data-requirements). + :param str training_data_content_type: (optional) The content type of + training_data. + :param str name: (optional) An optional name for the model. + :param dict user_metadata: (optional) An optional map of metadata key-value + pairs to store with this model. + :param str description: (optional) An optional description of the model. + :param str model_version: (optional) An optional version string. + :param str workspace_id: (optional) ID of the Watson Knowledge Studio + workspace that deployed this model to Natural Language Understanding. + :param str version_description: (optional) The description of the version. + :param ClassificationsTrainingParameters training_parameters: (optional) + Optional classifications training parameters along with model train + requests. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `ClassificationsModel` object + """ + + if not model_id: + raise ValueError('model_id must be provided') + if not language: + raise ValueError('language must be provided') + if training_data is None: + raise ValueError('training_data must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='update_classifications_model', + ) + headers.update(sdk_headers) -############################################################################## -# Models -############################################################################## + params = { + 'version': self.version, + } + form_data = [] + form_data.append(('language', (None, language, 'text/plain'))) + form_data.append(('training_data', + (None, training_data, training_data_content_type or + 'application/octet-stream'))) + if name: + form_data.append(('name', (None, name, 'text/plain'))) + if user_metadata: + form_data.append(('user_metadata', (None, json.dumps(user_metadata), + 'application/json'))) + if description: + form_data.append(('description', (None, description, 'text/plain'))) + if model_version: + form_data.append( + ('model_version', (None, model_version, 'text/plain'))) + if workspace_id: + form_data.append( + ('workspace_id', (None, workspace_id, 'text/plain'))) + if version_description: + form_data.append(('version_description', (None, version_description, + 'text/plain'))) + if training_parameters: + form_data.append( + ('training_parameters', (None, json.dumps(training_parameters), + 'application/json'))) + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['model_id'] + path_param_values = self.encode_path_vars(model_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/models/classifications/{model_id}'.format(**path_param_dict) + request = self.prepare_request( + method='PUT', + url=url, + headers=headers, + params=params, + files=form_data, + ) + + response = self.send(request, **kwargs) + return response -class AnalysisResults(object): - """ - Analysis results for each requested feature. - - :attr str language: (optional) Language used to analyze the text. - :attr str analyzed_text: (optional) Text that was used in the analysis. - :attr str retrieved_url: (optional) URL of the webpage that was analyzed. - :attr AnalysisResultsUsage usage: (optional) API usage information for the request. - :attr list[ConceptsResult] concepts: (optional) The general concepts referenced or - alluded to in the analyzed text. - :attr list[EntitiesResult] entities: (optional) The entities detected in the analyzed - text. - :attr list[KeywordsResult] keywords: (optional) The keywords from the analyzed text. - :attr list[CategoriesResult] categories: (optional) The categories that the service - assigned to the analyzed text. - :attr EmotionResult emotion: (optional) The anger, disgust, fear, joy, or sadness - conveyed by the content. - :attr AnalysisResultsMetadata metadata: (optional) Webpage metadata, such as the - author and the title of the page. - :attr list[RelationsResult] relations: (optional) The relationships between entities - in the content. - :attr list[SemanticRolesResult] semantic_roles: (optional) Sentences parsed into - `subject`, `action`, and `object` form. - :attr SentimentResult sentiment: (optional) The sentiment of the content. - :attr SyntaxResult syntax: (optional) Tokens and sentences returned from syntax - analysis. - """ - - def __init__(self, - language=None, - analyzed_text=None, - retrieved_url=None, - usage=None, - concepts=None, - entities=None, - keywords=None, - categories=None, - emotion=None, - metadata=None, - relations=None, - semantic_roles=None, - sentiment=None, - syntax=None): + def delete_classifications_model( + self, + model_id: str, + **kwargs, + ) -> DetailedResponse: """ - Initialize a AnalysisResults object. + Delete classifications model. - :param str language: (optional) Language used to analyze the text. - :param str analyzed_text: (optional) Text that was used in the analysis. - :param str retrieved_url: (optional) URL of the webpage that was analyzed. - :param AnalysisResultsUsage usage: (optional) API usage information for the - request. - :param list[ConceptsResult] concepts: (optional) The general concepts referenced - or alluded to in the analyzed text. - :param list[EntitiesResult] entities: (optional) The entities detected in the - analyzed text. - :param list[KeywordsResult] keywords: (optional) The keywords from the analyzed - text. - :param list[CategoriesResult] categories: (optional) The categories that the - service assigned to the analyzed text. - :param EmotionResult emotion: (optional) The anger, disgust, fear, joy, or sadness - conveyed by the content. - :param AnalysisResultsMetadata metadata: (optional) Webpage metadata, such as the - author and the title of the page. - :param list[RelationsResult] relations: (optional) The relationships between - entities in the content. - :param list[SemanticRolesResult] semantic_roles: (optional) Sentences parsed into - `subject`, `action`, and `object` form. - :param SentimentResult sentiment: (optional) The sentiment of the content. - :param SyntaxResult syntax: (optional) Tokens and sentences returned from syntax - analysis. + Un-deploys the custom classifications model with the given model ID and deletes + all associated customer data, including any training data or binary artifacts. + + :param str model_id: ID of the model. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `DeleteModelResults` object """ - self.language = language - self.analyzed_text = analyzed_text - self.retrieved_url = retrieved_url - self.usage = usage - self.concepts = concepts - self.entities = entities - self.keywords = keywords - self.categories = categories - self.emotion = emotion - self.metadata = metadata - self.relations = relations - self.semantic_roles = semantic_roles - self.sentiment = sentiment - self.syntax = syntax - @classmethod - def _from_dict(cls, _dict): - """Initialize a AnalysisResults object from a json dictionary.""" - args = {} - if 'language' in _dict: - args['language'] = _dict.get('language') - if 'analyzed_text' in _dict: - args['analyzed_text'] = _dict.get('analyzed_text') - if 'retrieved_url' in _dict: - args['retrieved_url'] = _dict.get('retrieved_url') - if 'usage' in _dict: - args['usage'] = AnalysisResultsUsage._from_dict(_dict.get('usage')) - if 'concepts' in _dict: - args['concepts'] = [ - ConceptsResult._from_dict(x) for x in (_dict.get('concepts')) - ] - if 'entities' in _dict: - args['entities'] = [ - EntitiesResult._from_dict(x) for x in (_dict.get('entities')) - ] - if 'keywords' in _dict: - args['keywords'] = [ - KeywordsResult._from_dict(x) for x in (_dict.get('keywords')) - ] - if 'categories' in _dict: + if not model_id: + raise ValueError('model_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='delete_classifications_model', + ) + headers.update(sdk_headers) + + params = { + 'version': self.version, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['model_id'] + path_param_values = self.encode_path_vars(model_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/models/classifications/{model_id}'.format(**path_param_dict) + request = self.prepare_request( + method='DELETE', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + +class CreateCategoriesModelEnums: + """ + Enums for create_categories_model parameters. + """ + + class TrainingDataContentType(str, Enum): + """ + The content type of training_data. + """ + + JSON = 'json' + APPLICATION_JSON = 'application/json' + + +class UpdateCategoriesModelEnums: + """ + Enums for update_categories_model parameters. + """ + + class TrainingDataContentType(str, Enum): + """ + The content type of training_data. + """ + + JSON = 'json' + APPLICATION_JSON = 'application/json' + + +class CreateClassificationsModelEnums: + """ + Enums for create_classifications_model parameters. + """ + + class TrainingDataContentType(str, Enum): + """ + The content type of training_data. + """ + + JSON = 'json' + APPLICATION_JSON = 'application/json' + + +class UpdateClassificationsModelEnums: + """ + Enums for update_classifications_model parameters. + """ + + class TrainingDataContentType(str, Enum): + """ + The content type of training_data. + """ + + JSON = 'json' + APPLICATION_JSON = 'application/json' + + +############################################################################## +# Models +############################################################################## + + +class AnalysisResults: + """ + Results of the analysis, organized by feature. + + :param str language: (optional) Language used to analyze the text. + :param str analyzed_text: (optional) Text that was used in the analysis. + :param str retrieved_url: (optional) URL of the webpage that was analyzed. + :param AnalysisResultsUsage usage: (optional) API usage information for the + request. + :param List[ConceptsResult] concepts: (optional) The general concepts referenced + or alluded to in the analyzed text. + :param List[EntitiesResult] entities: (optional) The entities detected in the + analyzed text. + :param List[KeywordsResult] keywords: (optional) The keywords from the analyzed + text. + :param List[CategoriesResult] categories: (optional) The categories that the + service assigned to the analyzed text. + :param List[ClassificationsResult] classifications: (optional) The + classifications assigned to the analyzed text. + :param EmotionResult emotion: (optional) The anger, disgust, fear, joy, or + sadness conveyed by the content. + :param FeaturesResultsMetadata metadata: (optional) Webpage metadata, such as + the author and the title of the page. + :param List[RelationsResult] relations: (optional) The relationships between + entities in the content. + :param List[SemanticRolesResult] semantic_roles: (optional) Sentences parsed + into `subject`, `action`, and `object` form. + :param SentimentResult sentiment: (optional) The sentiment of the content. + :param SyntaxResult syntax: (optional) Tokens and sentences returned from syntax + analysis. + """ + + def __init__( + self, + *, + language: Optional[str] = None, + analyzed_text: Optional[str] = None, + retrieved_url: Optional[str] = None, + usage: Optional['AnalysisResultsUsage'] = None, + concepts: Optional[List['ConceptsResult']] = None, + entities: Optional[List['EntitiesResult']] = None, + keywords: Optional[List['KeywordsResult']] = None, + categories: Optional[List['CategoriesResult']] = None, + classifications: Optional[List['ClassificationsResult']] = None, + emotion: Optional['EmotionResult'] = None, + metadata: Optional['FeaturesResultsMetadata'] = None, + relations: Optional[List['RelationsResult']] = None, + semantic_roles: Optional[List['SemanticRolesResult']] = None, + sentiment: Optional['SentimentResult'] = None, + syntax: Optional['SyntaxResult'] = None, + ) -> None: + """ + Initialize a AnalysisResults object. + + :param str language: (optional) Language used to analyze the text. + :param str analyzed_text: (optional) Text that was used in the analysis. + :param str retrieved_url: (optional) URL of the webpage that was analyzed. + :param AnalysisResultsUsage usage: (optional) API usage information for the + request. + :param List[ConceptsResult] concepts: (optional) The general concepts + referenced or alluded to in the analyzed text. + :param List[EntitiesResult] entities: (optional) The entities detected in + the analyzed text. + :param List[KeywordsResult] keywords: (optional) The keywords from the + analyzed text. + :param List[CategoriesResult] categories: (optional) The categories that + the service assigned to the analyzed text. + :param List[ClassificationsResult] classifications: (optional) The + classifications assigned to the analyzed text. + :param EmotionResult emotion: (optional) The anger, disgust, fear, joy, or + sadness conveyed by the content. + :param FeaturesResultsMetadata metadata: (optional) Webpage metadata, such + as the author and the title of the page. + :param List[RelationsResult] relations: (optional) The relationships + between entities in the content. + :param List[SemanticRolesResult] semantic_roles: (optional) Sentences + parsed into `subject`, `action`, and `object` form. + :param SentimentResult sentiment: (optional) The sentiment of the content. + :param SyntaxResult syntax: (optional) Tokens and sentences returned from + syntax analysis. + """ + self.language = language + self.analyzed_text = analyzed_text + self.retrieved_url = retrieved_url + self.usage = usage + self.concepts = concepts + self.entities = entities + self.keywords = keywords + self.categories = categories + self.classifications = classifications + self.emotion = emotion + self.metadata = metadata + self.relations = relations + self.semantic_roles = semantic_roles + self.sentiment = sentiment + self.syntax = syntax + + @classmethod + def from_dict(cls, _dict: Dict) -> 'AnalysisResults': + """Initialize a AnalysisResults object from a json dictionary.""" + args = {} + if (language := _dict.get('language')) is not None: + args['language'] = language + if (analyzed_text := _dict.get('analyzed_text')) is not None: + args['analyzed_text'] = analyzed_text + if (retrieved_url := _dict.get('retrieved_url')) is not None: + args['retrieved_url'] = retrieved_url + if (usage := _dict.get('usage')) is not None: + args['usage'] = AnalysisResultsUsage.from_dict(usage) + if (concepts := _dict.get('concepts')) is not None: + args['concepts'] = [ConceptsResult.from_dict(v) for v in concepts] + if (entities := _dict.get('entities')) is not None: + args['entities'] = [EntitiesResult.from_dict(v) for v in entities] + if (keywords := _dict.get('keywords')) is not None: + args['keywords'] = [KeywordsResult.from_dict(v) for v in keywords] + if (categories := _dict.get('categories')) is not None: args['categories'] = [ - CategoriesResult._from_dict(x) - for x in (_dict.get('categories')) + CategoriesResult.from_dict(v) for v in categories + ] + if (classifications := _dict.get('classifications')) is not None: + args['classifications'] = [ + ClassificationsResult.from_dict(v) for v in classifications ] - if 'emotion' in _dict: - args['emotion'] = EmotionResult._from_dict(_dict.get('emotion')) - if 'metadata' in _dict: - args['metadata'] = AnalysisResultsMetadata._from_dict( - _dict.get('metadata')) - if 'relations' in _dict: + if (emotion := _dict.get('emotion')) is not None: + args['emotion'] = EmotionResult.from_dict(emotion) + if (metadata := _dict.get('metadata')) is not None: + args['metadata'] = FeaturesResultsMetadata.from_dict(metadata) + if (relations := _dict.get('relations')) is not None: args['relations'] = [ - RelationsResult._from_dict(x) for x in (_dict.get('relations')) + RelationsResult.from_dict(v) for v in relations ] - if 'semantic_roles' in _dict: + if (semantic_roles := _dict.get('semantic_roles')) is not None: args['semantic_roles'] = [ - SemanticRolesResult._from_dict(x) - for x in (_dict.get('semantic_roles')) + SemanticRolesResult.from_dict(v) for v in semantic_roles ] - if 'sentiment' in _dict: - args['sentiment'] = SentimentResult._from_dict( - _dict.get('sentiment')) - if 'syntax' in _dict: - args['syntax'] = SyntaxResult._from_dict(_dict.get('syntax')) + if (sentiment := _dict.get('sentiment')) is not None: + args['sentiment'] = SentimentResult.from_dict(sentiment) + if (syntax := _dict.get('syntax')) is not None: + args['syntax'] = SyntaxResult.from_dict(syntax) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a AnalysisResults object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'language') and self.language is not None: + _dict['language'] = self.language + if hasattr(self, 'analyzed_text') and self.analyzed_text is not None: + _dict['analyzed_text'] = self.analyzed_text + if hasattr(self, 'retrieved_url') and self.retrieved_url is not None: + _dict['retrieved_url'] = self.retrieved_url + if hasattr(self, 'usage') and self.usage is not None: + if isinstance(self.usage, dict): + _dict['usage'] = self.usage + else: + _dict['usage'] = self.usage.to_dict() + if hasattr(self, 'concepts') and self.concepts is not None: + concepts_list = [] + for v in self.concepts: + if isinstance(v, dict): + concepts_list.append(v) + else: + concepts_list.append(v.to_dict()) + _dict['concepts'] = concepts_list + if hasattr(self, 'entities') and self.entities is not None: + entities_list = [] + for v in self.entities: + if isinstance(v, dict): + entities_list.append(v) + else: + entities_list.append(v.to_dict()) + _dict['entities'] = entities_list + if hasattr(self, 'keywords') and self.keywords is not None: + keywords_list = [] + for v in self.keywords: + if isinstance(v, dict): + keywords_list.append(v) + else: + keywords_list.append(v.to_dict()) + _dict['keywords'] = keywords_list + if hasattr(self, 'categories') and self.categories is not None: + categories_list = [] + for v in self.categories: + if isinstance(v, dict): + categories_list.append(v) + else: + categories_list.append(v.to_dict()) + _dict['categories'] = categories_list + if hasattr(self, + 'classifications') and self.classifications is not None: + classifications_list = [] + for v in self.classifications: + if isinstance(v, dict): + classifications_list.append(v) + else: + classifications_list.append(v.to_dict()) + _dict['classifications'] = classifications_list + if hasattr(self, 'emotion') and self.emotion is not None: + if isinstance(self.emotion, dict): + _dict['emotion'] = self.emotion + else: + _dict['emotion'] = self.emotion.to_dict() + if hasattr(self, 'metadata') and self.metadata is not None: + if isinstance(self.metadata, dict): + _dict['metadata'] = self.metadata + else: + _dict['metadata'] = self.metadata.to_dict() + if hasattr(self, 'relations') and self.relations is not None: + relations_list = [] + for v in self.relations: + if isinstance(v, dict): + relations_list.append(v) + else: + relations_list.append(v.to_dict()) + _dict['relations'] = relations_list + if hasattr(self, 'semantic_roles') and self.semantic_roles is not None: + semantic_roles_list = [] + for v in self.semantic_roles: + if isinstance(v, dict): + semantic_roles_list.append(v) + else: + semantic_roles_list.append(v.to_dict()) + _dict['semantic_roles'] = semantic_roles_list + if hasattr(self, 'sentiment') and self.sentiment is not None: + if isinstance(self.sentiment, dict): + _dict['sentiment'] = self.sentiment + else: + _dict['sentiment'] = self.sentiment.to_dict() + if hasattr(self, 'syntax') and self.syntax is not None: + if isinstance(self.syntax, dict): + _dict['syntax'] = self.syntax + else: + _dict['syntax'] = self.syntax.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this AnalysisResults object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'AnalysisResults') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'AnalysisResults') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class AnalysisResultsUsage: + """ + API usage information for the request. + + :param int features: (optional) Number of features used in the API call. + :param int text_characters: (optional) Number of text characters processed. + :param int text_units: (optional) Number of 10,000-character units processed. + """ + + def __init__( + self, + *, + features: Optional[int] = None, + text_characters: Optional[int] = None, + text_units: Optional[int] = None, + ) -> None: + """ + Initialize a AnalysisResultsUsage object. + + :param int features: (optional) Number of features used in the API call. + :param int text_characters: (optional) Number of text characters processed. + :param int text_units: (optional) Number of 10,000-character units + processed. + """ + self.features = features + self.text_characters = text_characters + self.text_units = text_units + + @classmethod + def from_dict(cls, _dict: Dict) -> 'AnalysisResultsUsage': + """Initialize a AnalysisResultsUsage object from a json dictionary.""" + args = {} + if (features := _dict.get('features')) is not None: + args['features'] = features + if (text_characters := _dict.get('text_characters')) is not None: + args['text_characters'] = text_characters + if (text_units := _dict.get('text_units')) is not None: + args['text_units'] = text_units + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a AnalysisResultsUsage object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'features') and self.features is not None: + _dict['features'] = self.features + if hasattr(self, + 'text_characters') and self.text_characters is not None: + _dict['text_characters'] = self.text_characters + if hasattr(self, 'text_units') and self.text_units is not None: + _dict['text_units'] = self.text_units + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this AnalysisResultsUsage object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'AnalysisResultsUsage') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'AnalysisResultsUsage') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Author: + """ + The author of the analyzed content. + + :param str name: (optional) Name of the author. + """ + + def __init__( + self, + *, + name: Optional[str] = None, + ) -> None: + """ + Initialize a Author object. + + :param str name: (optional) Name of the author. + """ + self.name = name + + @classmethod + def from_dict(cls, _dict: Dict) -> 'Author': + """Initialize a Author object from a json dictionary.""" + args = {} + if (name := _dict.get('name')) is not None: + args['name'] = name + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Author object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Author object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'Author') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'Author') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class CategoriesModel: + """ + Categories model. + + :param str name: (optional) An optional name for the model. + :param dict user_metadata: (optional) An optional map of metadata key-value + pairs to store with this model. + :param str language: The 2-letter language code of this model. + :param str description: (optional) An optional description of the model. + :param str model_version: (optional) An optional version string. + :param str workspace_id: (optional) ID of the Watson Knowledge Studio workspace + that deployed this model to Natural Language Understanding. + :param str version_description: (optional) The description of the version. + :param List[str] features: (optional) The service features that are supported by + the custom model. + :param str status: When the status is `available`, the model is ready to use. + :param str model_id: Unique model ID. + :param datetime created: dateTime indicating when the model was created. + :param List[Notice] notices: (optional) + :param datetime last_trained: (optional) dateTime of last successful model + training. + :param datetime last_deployed: (optional) dateTime of last successful model + deployment. + """ + + def __init__( + self, + language: str, + status: str, + model_id: str, + created: datetime, + *, + name: Optional[str] = None, + user_metadata: Optional[dict] = None, + description: Optional[str] = None, + model_version: Optional[str] = None, + workspace_id: Optional[str] = None, + version_description: Optional[str] = None, + features: Optional[List[str]] = None, + notices: Optional[List['Notice']] = None, + last_trained: Optional[datetime] = None, + last_deployed: Optional[datetime] = None, + ) -> None: + """ + Initialize a CategoriesModel object. + + :param str language: The 2-letter language code of this model. + :param str status: When the status is `available`, the model is ready to + use. + :param str model_id: Unique model ID. + :param datetime created: dateTime indicating when the model was created. + :param str name: (optional) An optional name for the model. + :param dict user_metadata: (optional) An optional map of metadata key-value + pairs to store with this model. + :param str description: (optional) An optional description of the model. + :param str model_version: (optional) An optional version string. + :param str workspace_id: (optional) ID of the Watson Knowledge Studio + workspace that deployed this model to Natural Language Understanding. + :param str version_description: (optional) The description of the version. + :param List[str] features: (optional) The service features that are + supported by the custom model. + :param List[Notice] notices: (optional) + :param datetime last_trained: (optional) dateTime of last successful model + training. + :param datetime last_deployed: (optional) dateTime of last successful model + deployment. + """ + self.name = name + self.user_metadata = user_metadata + self.language = language + self.description = description + self.model_version = model_version + self.workspace_id = workspace_id + self.version_description = version_description + self.features = features + self.status = status + self.model_id = model_id + self.created = created + self.notices = notices + self.last_trained = last_trained + self.last_deployed = last_deployed + + @classmethod + def from_dict(cls, _dict: Dict) -> 'CategoriesModel': + """Initialize a CategoriesModel object from a json dictionary.""" + args = {} + if (name := _dict.get('name')) is not None: + args['name'] = name + if (user_metadata := _dict.get('user_metadata')) is not None: + args['user_metadata'] = user_metadata + if (language := _dict.get('language')) is not None: + args['language'] = language + else: + raise ValueError( + 'Required property \'language\' not present in CategoriesModel JSON' + ) + if (description := _dict.get('description')) is not None: + args['description'] = description + if (model_version := _dict.get('model_version')) is not None: + args['model_version'] = model_version + if (workspace_id := _dict.get('workspace_id')) is not None: + args['workspace_id'] = workspace_id + if (version_description := + _dict.get('version_description')) is not None: + args['version_description'] = version_description + if (features := _dict.get('features')) is not None: + args['features'] = features + if (status := _dict.get('status')) is not None: + args['status'] = status + else: + raise ValueError( + 'Required property \'status\' not present in CategoriesModel JSON' + ) + if (model_id := _dict.get('model_id')) is not None: + args['model_id'] = model_id + else: + raise ValueError( + 'Required property \'model_id\' not present in CategoriesModel JSON' + ) + if (created := _dict.get('created')) is not None: + args['created'] = string_to_datetime(created) + else: + raise ValueError( + 'Required property \'created\' not present in CategoriesModel JSON' + ) + if (notices := _dict.get('notices')) is not None: + args['notices'] = [Notice.from_dict(v) for v in notices] + if (last_trained := _dict.get('last_trained')) is not None: + args['last_trained'] = string_to_datetime(last_trained) + if (last_deployed := _dict.get('last_deployed')) is not None: + args['last_deployed'] = string_to_datetime(last_deployed) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a CategoriesModel object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'user_metadata') and self.user_metadata is not None: + _dict['user_metadata'] = self.user_metadata + if hasattr(self, 'language') and self.language is not None: + _dict['language'] = self.language + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'model_version') and self.model_version is not None: + _dict['model_version'] = self.model_version + if hasattr(self, 'workspace_id') and self.workspace_id is not None: + _dict['workspace_id'] = self.workspace_id + if hasattr( + self, + 'version_description') and self.version_description is not None: + _dict['version_description'] = self.version_description + if hasattr(self, 'features') and self.features is not None: + _dict['features'] = self.features + if hasattr(self, 'status') and self.status is not None: + _dict['status'] = self.status + if hasattr(self, 'model_id') and self.model_id is not None: + _dict['model_id'] = self.model_id + if hasattr(self, 'created') and self.created is not None: + _dict['created'] = datetime_to_string(self.created) + if hasattr(self, 'notices') and self.notices is not None: + notices_list = [] + for v in self.notices: + if isinstance(v, dict): + notices_list.append(v) + else: + notices_list.append(v.to_dict()) + _dict['notices'] = notices_list + if hasattr(self, 'last_trained') and self.last_trained is not None: + _dict['last_trained'] = datetime_to_string(self.last_trained) + if hasattr(self, 'last_deployed') and self.last_deployed is not None: + _dict['last_deployed'] = datetime_to_string(self.last_deployed) + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this CategoriesModel object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'CategoriesModel') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'CategoriesModel') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class StatusEnum(str, Enum): + """ + When the status is `available`, the model is ready to use. + """ + + STARTING = 'starting' + TRAINING = 'training' + DEPLOYING = 'deploying' + AVAILABLE = 'available' + ERROR = 'error' + DELETED = 'deleted' + + +class CategoriesModelList: + """ + List of categories models. + + :param List[CategoriesModel] models: (optional) The categories models. + """ + + def __init__( + self, + *, + models: Optional[List['CategoriesModel']] = None, + ) -> None: + """ + Initialize a CategoriesModelList object. + + :param List[CategoriesModel] models: (optional) The categories models. + """ + self.models = models + + @classmethod + def from_dict(cls, _dict: Dict) -> 'CategoriesModelList': + """Initialize a CategoriesModelList object from a json dictionary.""" + args = {} + if (models := _dict.get('models')) is not None: + args['models'] = [CategoriesModel.from_dict(v) for v in models] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a CategoriesModelList object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'models') and self.models is not None: + models_list = [] + for v in self.models: + if isinstance(v, dict): + models_list.append(v) + else: + models_list.append(v.to_dict()) + _dict['models'] = models_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this CategoriesModelList object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'CategoriesModelList') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'CategoriesModelList') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class CategoriesOptions: + """ + Returns a hierarchical taxonomy of the content. The top three categories are returned + by default. + Supported languages: Arabic, English, French, German, Italian, Japanese, Korean, + Portuguese, Spanish. + + :param bool explanation: (optional) Set this to `true` to return explanations + for each categorization. **This is available only for English categories.**. + :param int limit: (optional) Maximum number of categories to return. + :param str model: (optional) (Beta) Enter a [custom + model](https://cloud.ibm.com/docs/natural-language-understanding?topic=natural-language-understanding-customizing) + ID to override the standard categories model. **This is available only for + English categories.**. + """ + + def __init__( + self, + *, + explanation: Optional[bool] = None, + limit: Optional[int] = None, + model: Optional[str] = None, + ) -> None: + """ + Initialize a CategoriesOptions object. + + :param bool explanation: (optional) Set this to `true` to return + explanations for each categorization. **This is available only for English + categories.**. + :param int limit: (optional) Maximum number of categories to return. + :param str model: (optional) (Beta) Enter a [custom + model](https://cloud.ibm.com/docs/natural-language-understanding?topic=natural-language-understanding-customizing) + ID to override the standard categories model. **This is available only for + English categories.**. + """ + self.explanation = explanation + self.limit = limit + self.model = model + + @classmethod + def from_dict(cls, _dict: Dict) -> 'CategoriesOptions': + """Initialize a CategoriesOptions object from a json dictionary.""" + args = {} + if (explanation := _dict.get('explanation')) is not None: + args['explanation'] = explanation + if (limit := _dict.get('limit')) is not None: + args['limit'] = limit + if (model := _dict.get('model')) is not None: + args['model'] = model + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a CategoriesOptions object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'explanation') and self.explanation is not None: + _dict['explanation'] = self.explanation + if hasattr(self, 'limit') and self.limit is not None: + _dict['limit'] = self.limit + if hasattr(self, 'model') and self.model is not None: + _dict['model'] = self.model + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this CategoriesOptions object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'CategoriesOptions') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'CategoriesOptions') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class CategoriesRelevantText: + """ + Relevant text that contributed to the categorization. + + :param str text: (optional) Text from the analyzed source that supports the + categorization. + """ + + def __init__( + self, + *, + text: Optional[str] = None, + ) -> None: + """ + Initialize a CategoriesRelevantText object. + + :param str text: (optional) Text from the analyzed source that supports the + categorization. + """ + self.text = text + + @classmethod + def from_dict(cls, _dict: Dict) -> 'CategoriesRelevantText': + """Initialize a CategoriesRelevantText object from a json dictionary.""" + args = {} + if (text := _dict.get('text')) is not None: + args['text'] = text + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a CategoriesRelevantText object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'text') and self.text is not None: + _dict['text'] = self.text + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this CategoriesRelevantText object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'CategoriesRelevantText') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'CategoriesRelevantText') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class CategoriesResult: + """ + A categorization of the analyzed text. + + :param str label: (optional) The path to the category through the multi-level + taxonomy hierarchy. For more information about the categories, see [Categories + hierarchy](https://cloud.ibm.com/docs/natural-language-understanding?topic=natural-language-understanding-categories#categories-hierarchy). + :param float score: (optional) Confidence score for the category classification. + Higher values indicate greater confidence. + :param CategoriesResultExplanation explanation: (optional) Information that + helps to explain what contributed to the categories result. + """ + + def __init__( + self, + *, + label: Optional[str] = None, + score: Optional[float] = None, + explanation: Optional['CategoriesResultExplanation'] = None, + ) -> None: + """ + Initialize a CategoriesResult object. + + :param str label: (optional) The path to the category through the + multi-level taxonomy hierarchy. For more information about the categories, + see [Categories + hierarchy](https://cloud.ibm.com/docs/natural-language-understanding?topic=natural-language-understanding-categories#categories-hierarchy). + :param float score: (optional) Confidence score for the category + classification. Higher values indicate greater confidence. + :param CategoriesResultExplanation explanation: (optional) Information that + helps to explain what contributed to the categories result. + """ + self.label = label + self.score = score + self.explanation = explanation + + @classmethod + def from_dict(cls, _dict: Dict) -> 'CategoriesResult': + """Initialize a CategoriesResult object from a json dictionary.""" + args = {} + if (label := _dict.get('label')) is not None: + args['label'] = label + if (score := _dict.get('score')) is not None: + args['score'] = score + if (explanation := _dict.get('explanation')) is not None: + args['explanation'] = CategoriesResultExplanation.from_dict( + explanation) return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a CategoriesResult object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'language') and self.language is not None: - _dict['language'] = self.language - if hasattr(self, 'analyzed_text') and self.analyzed_text is not None: - _dict['analyzed_text'] = self.analyzed_text - if hasattr(self, 'retrieved_url') and self.retrieved_url is not None: - _dict['retrieved_url'] = self.retrieved_url - if hasattr(self, 'usage') and self.usage is not None: - _dict['usage'] = self.usage._to_dict() - if hasattr(self, 'concepts') and self.concepts is not None: - _dict['concepts'] = [x._to_dict() for x in self.concepts] - if hasattr(self, 'entities') and self.entities is not None: - _dict['entities'] = [x._to_dict() for x in self.entities] - if hasattr(self, 'keywords') and self.keywords is not None: - _dict['keywords'] = [x._to_dict() for x in self.keywords] - if hasattr(self, 'categories') and self.categories is not None: - _dict['categories'] = [x._to_dict() for x in self.categories] - if hasattr(self, 'emotion') and self.emotion is not None: - _dict['emotion'] = self.emotion._to_dict() - if hasattr(self, 'metadata') and self.metadata is not None: - _dict['metadata'] = self.metadata._to_dict() - if hasattr(self, 'relations') and self.relations is not None: - _dict['relations'] = [x._to_dict() for x in self.relations] - if hasattr(self, 'semantic_roles') and self.semantic_roles is not None: - _dict['semantic_roles'] = [ - x._to_dict() for x in self.semantic_roles - ] - if hasattr(self, 'sentiment') and self.sentiment is not None: - _dict['sentiment'] = self.sentiment._to_dict() - if hasattr(self, 'syntax') and self.syntax is not None: - _dict['syntax'] = self.syntax._to_dict() + if hasattr(self, 'label') and self.label is not None: + _dict['label'] = self.label + if hasattr(self, 'score') and self.score is not None: + _dict['score'] = self.score + if hasattr(self, 'explanation') and self.explanation is not None: + if isinstance(self.explanation, dict): + _dict['explanation'] = self.explanation + else: + _dict['explanation'] = self.explanation.to_dict() return _dict - def __str__(self): - """Return a `str` version of this AnalysisResults object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this CategoriesResult object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'CategoriesResult') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'CategoriesResult') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class AnalysisResultsMetadata(object): +class CategoriesResultExplanation: """ - Webpage metadata, such as the author and the title of the page. + Information that helps to explain what contributed to the categories result. - :attr list[Author] authors: (optional) The authors of the document. - :attr str publication_date: (optional) The publication date in the format ISO 8601. - :attr str title: (optional) The title of the document. - :attr str image: (optional) URL of a prominent image on the webpage. - :attr list[Feed] feeds: (optional) RSS/ATOM feeds found on the webpage. + :param List[CategoriesRelevantText] relevant_text: (optional) An array of + relevant text from the source that contributed to the categorization. The sorted + array begins with the phrase that contributed most significantly to the result, + followed by phrases that were less and less impactful. """ - def __init__(self, - authors=None, - publication_date=None, - title=None, - image=None, - feeds=None): + def __init__( + self, + *, + relevant_text: Optional[List['CategoriesRelevantText']] = None, + ) -> None: """ - Initialize a AnalysisResultsMetadata object. + Initialize a CategoriesResultExplanation object. - :param list[Author] authors: (optional) The authors of the document. - :param str publication_date: (optional) The publication date in the format ISO - 8601. - :param str title: (optional) The title of the document. - :param str image: (optional) URL of a prominent image on the webpage. - :param list[Feed] feeds: (optional) RSS/ATOM feeds found on the webpage. + :param List[CategoriesRelevantText] relevant_text: (optional) An array of + relevant text from the source that contributed to the categorization. The + sorted array begins with the phrase that contributed most significantly to + the result, followed by phrases that were less and less impactful. """ - self.authors = authors - self.publication_date = publication_date - self.title = title - self.image = image - self.feeds = feeds + self.relevant_text = relevant_text @classmethod - def _from_dict(cls, _dict): - """Initialize a AnalysisResultsMetadata object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'CategoriesResultExplanation': + """Initialize a CategoriesResultExplanation object from a json dictionary.""" args = {} - if 'authors' in _dict: - args['authors'] = [ - Author._from_dict(x) for x in (_dict.get('authors')) + if (relevant_text := _dict.get('relevant_text')) is not None: + args['relevant_text'] = [ + CategoriesRelevantText.from_dict(v) for v in relevant_text ] - if 'publication_date' in _dict: - args['publication_date'] = _dict.get('publication_date') - if 'title' in _dict: - args['title'] = _dict.get('title') - if 'image' in _dict: - args['image'] = _dict.get('image') - if 'feeds' in _dict: - args['feeds'] = [Feed._from_dict(x) for x in (_dict.get('feeds'))] return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a CategoriesResultExplanation object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'authors') and self.authors is not None: - _dict['authors'] = [x._to_dict() for x in self.authors] - if hasattr(self, - 'publication_date') and self.publication_date is not None: - _dict['publication_date'] = self.publication_date - if hasattr(self, 'title') and self.title is not None: - _dict['title'] = self.title - if hasattr(self, 'image') and self.image is not None: - _dict['image'] = self.image - if hasattr(self, 'feeds') and self.feeds is not None: - _dict['feeds'] = [x._to_dict() for x in self.feeds] + if hasattr(self, 'relevant_text') and self.relevant_text is not None: + relevant_text_list = [] + for v in self.relevant_text: + if isinstance(v, dict): + relevant_text_list.append(v) + else: + relevant_text_list.append(v.to_dict()) + _dict['relevant_text'] = relevant_text_list return _dict - def __str__(self): - """Return a `str` version of this AnalysisResultsMetadata object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __eq__(self, other): + def __str__(self) -> str: + """Return a `str` version of this CategoriesResultExplanation object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'CategoriesResultExplanation') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'CategoriesResultExplanation') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class AnalysisResultsUsage(object): +class ClassificationsModel: """ - API usage information for the request. - - :attr int features: (optional) Number of features used in the API call. - :attr int text_characters: (optional) Number of text characters processed. - :attr int text_units: (optional) Number of 10,000-character units processed. + Classifications model. + + :param str name: (optional) An optional name for the model. + :param dict user_metadata: (optional) An optional map of metadata key-value + pairs to store with this model. + :param str language: The 2-letter language code of this model. + :param str description: (optional) An optional description of the model. + :param str model_version: (optional) An optional version string. + :param str workspace_id: (optional) ID of the Watson Knowledge Studio workspace + that deployed this model to Natural Language Understanding. + :param str version_description: (optional) The description of the version. + :param List[str] features: (optional) The service features that are supported by + the custom model. + :param str status: When the status is `available`, the model is ready to use. + :param str model_id: Unique model ID. + :param datetime created: dateTime indicating when the model was created. + :param List[Notice] notices: (optional) + :param datetime last_trained: (optional) dateTime of last successful model + training. + :param datetime last_deployed: (optional) dateTime of last successful model + deployment. """ - def __init__(self, features=None, text_characters=None, text_units=None): + def __init__( + self, + language: str, + status: str, + model_id: str, + created: datetime, + *, + name: Optional[str] = None, + user_metadata: Optional[dict] = None, + description: Optional[str] = None, + model_version: Optional[str] = None, + workspace_id: Optional[str] = None, + version_description: Optional[str] = None, + features: Optional[List[str]] = None, + notices: Optional[List['Notice']] = None, + last_trained: Optional[datetime] = None, + last_deployed: Optional[datetime] = None, + ) -> None: """ - Initialize a AnalysisResultsUsage object. - - :param int features: (optional) Number of features used in the API call. - :param int text_characters: (optional) Number of text characters processed. - :param int text_units: (optional) Number of 10,000-character units processed. + Initialize a ClassificationsModel object. + + :param str language: The 2-letter language code of this model. + :param str status: When the status is `available`, the model is ready to + use. + :param str model_id: Unique model ID. + :param datetime created: dateTime indicating when the model was created. + :param str name: (optional) An optional name for the model. + :param dict user_metadata: (optional) An optional map of metadata key-value + pairs to store with this model. + :param str description: (optional) An optional description of the model. + :param str model_version: (optional) An optional version string. + :param str workspace_id: (optional) ID of the Watson Knowledge Studio + workspace that deployed this model to Natural Language Understanding. + :param str version_description: (optional) The description of the version. + :param List[str] features: (optional) The service features that are + supported by the custom model. + :param List[Notice] notices: (optional) + :param datetime last_trained: (optional) dateTime of last successful model + training. + :param datetime last_deployed: (optional) dateTime of last successful model + deployment. """ + self.name = name + self.user_metadata = user_metadata + self.language = language + self.description = description + self.model_version = model_version + self.workspace_id = workspace_id + self.version_description = version_description self.features = features - self.text_characters = text_characters - self.text_units = text_units + self.status = status + self.model_id = model_id + self.created = created + self.notices = notices + self.last_trained = last_trained + self.last_deployed = last_deployed @classmethod - def _from_dict(cls, _dict): - """Initialize a AnalysisResultsUsage object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'ClassificationsModel': + """Initialize a ClassificationsModel object from a json dictionary.""" args = {} - if 'features' in _dict: - args['features'] = _dict.get('features') - if 'text_characters' in _dict: - args['text_characters'] = _dict.get('text_characters') - if 'text_units' in _dict: - args['text_units'] = _dict.get('text_units') + if (name := _dict.get('name')) is not None: + args['name'] = name + if (user_metadata := _dict.get('user_metadata')) is not None: + args['user_metadata'] = user_metadata + if (language := _dict.get('language')) is not None: + args['language'] = language + else: + raise ValueError( + 'Required property \'language\' not present in ClassificationsModel JSON' + ) + if (description := _dict.get('description')) is not None: + args['description'] = description + if (model_version := _dict.get('model_version')) is not None: + args['model_version'] = model_version + if (workspace_id := _dict.get('workspace_id')) is not None: + args['workspace_id'] = workspace_id + if (version_description := + _dict.get('version_description')) is not None: + args['version_description'] = version_description + if (features := _dict.get('features')) is not None: + args['features'] = features + if (status := _dict.get('status')) is not None: + args['status'] = status + else: + raise ValueError( + 'Required property \'status\' not present in ClassificationsModel JSON' + ) + if (model_id := _dict.get('model_id')) is not None: + args['model_id'] = model_id + else: + raise ValueError( + 'Required property \'model_id\' not present in ClassificationsModel JSON' + ) + if (created := _dict.get('created')) is not None: + args['created'] = string_to_datetime(created) + else: + raise ValueError( + 'Required property \'created\' not present in ClassificationsModel JSON' + ) + if (notices := _dict.get('notices')) is not None: + args['notices'] = [Notice.from_dict(v) for v in notices] + if (last_trained := _dict.get('last_trained')) is not None: + args['last_trained'] = string_to_datetime(last_trained) + if (last_deployed := _dict.get('last_deployed')) is not None: + args['last_deployed'] = string_to_datetime(last_deployed) return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a ClassificationsModel object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'user_metadata') and self.user_metadata is not None: + _dict['user_metadata'] = self.user_metadata + if hasattr(self, 'language') and self.language is not None: + _dict['language'] = self.language + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'model_version') and self.model_version is not None: + _dict['model_version'] = self.model_version + if hasattr(self, 'workspace_id') and self.workspace_id is not None: + _dict['workspace_id'] = self.workspace_id + if hasattr( + self, + 'version_description') and self.version_description is not None: + _dict['version_description'] = self.version_description if hasattr(self, 'features') and self.features is not None: _dict['features'] = self.features - if hasattr(self, - 'text_characters') and self.text_characters is not None: - _dict['text_characters'] = self.text_characters - if hasattr(self, 'text_units') and self.text_units is not None: - _dict['text_units'] = self.text_units + if hasattr(self, 'status') and self.status is not None: + _dict['status'] = self.status + if hasattr(self, 'model_id') and self.model_id is not None: + _dict['model_id'] = self.model_id + if hasattr(self, 'created') and self.created is not None: + _dict['created'] = datetime_to_string(self.created) + if hasattr(self, 'notices') and self.notices is not None: + notices_list = [] + for v in self.notices: + if isinstance(v, dict): + notices_list.append(v) + else: + notices_list.append(v.to_dict()) + _dict['notices'] = notices_list + if hasattr(self, 'last_trained') and self.last_trained is not None: + _dict['last_trained'] = datetime_to_string(self.last_trained) + if hasattr(self, 'last_deployed') and self.last_deployed is not None: + _dict['last_deployed'] = datetime_to_string(self.last_deployed) return _dict - def __str__(self): - """Return a `str` version of this AnalysisResultsUsage object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __eq__(self, other): + def __str__(self) -> str: + """Return a `str` version of this ClassificationsModel object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ClassificationsModel') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'ClassificationsModel') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other + class StatusEnum(str, Enum): + """ + When the status is `available`, the model is ready to use. + """ + + STARTING = 'starting' + TRAINING = 'training' + DEPLOYING = 'deploying' + AVAILABLE = 'available' + ERROR = 'error' + DELETED = 'deleted' -class Author(object): + +class ClassificationsModelList: """ - The author of the analyzed content. + List of classifications models. - :attr str name: (optional) Name of the author. + :param List[ClassificationsModel] models: (optional) The classifications models. """ - def __init__(self, name=None): + def __init__( + self, + *, + models: Optional[List['ClassificationsModel']] = None, + ) -> None: """ - Initialize a Author object. + Initialize a ClassificationsModelList object. - :param str name: (optional) Name of the author. + :param List[ClassificationsModel] models: (optional) The classifications + models. """ - self.name = name + self.models = models @classmethod - def _from_dict(cls, _dict): - """Initialize a Author object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'ClassificationsModelList': + """Initialize a ClassificationsModelList object from a json dictionary.""" args = {} - if 'name' in _dict: - args['name'] = _dict.get('name') + if (models := _dict.get('models')) is not None: + args['models'] = [ClassificationsModel.from_dict(v) for v in models] return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a ClassificationsModelList object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'name') and self.name is not None: - _dict['name'] = self.name + if hasattr(self, 'models') and self.models is not None: + models_list = [] + for v in self.models: + if isinstance(v, dict): + models_list.append(v) + else: + models_list.append(v.to_dict()) + _dict['models'] = models_list return _dict - def __str__(self): - """Return a `str` version of this Author object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ClassificationsModelList object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'ClassificationsModelList') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'ClassificationsModelList') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class CategoriesOptions(object): +class ClassificationsOptions: """ - Returns a five-level taxonomy of the content. The top three categories are returned. - Supported languages: Arabic, English, French, German, Italian, Japanese, Korean, - Portuguese, Spanish. - - :attr int limit: (optional) Maximum number of categories to return. - :attr str model: (optional) Enter a [custom - model](https://cloud.ibm.com/docs/services/natural-language-understanding/customizing.html) - ID to override the standard categories model. + Returns text classifications for the content. + + :param str model: (optional) Enter a [custom + model](https://cloud.ibm.com/docs/natural-language-understanding?topic=natural-language-understanding-customizing) + ID of the classifications model to be used. + You can analyze tone by using a language-specific model ID. See [Tone analytics + (Classifications)](https://cloud.ibm.com/docs/natural-language-understanding?topic=natural-language-understanding-tone_analytics) + for more information. """ - def __init__(self, limit=None, model=None): + def __init__( + self, + *, + model: Optional[str] = None, + ) -> None: """ - Initialize a CategoriesOptions object. + Initialize a ClassificationsOptions object. - :param int limit: (optional) Maximum number of categories to return. :param str model: (optional) Enter a [custom - model](https://cloud.ibm.com/docs/services/natural-language-understanding/customizing.html) - ID to override the standard categories model. + model](https://cloud.ibm.com/docs/natural-language-understanding?topic=natural-language-understanding-customizing) + ID of the classifications model to be used. + You can analyze tone by using a language-specific model ID. See [Tone + analytics + (Classifications)](https://cloud.ibm.com/docs/natural-language-understanding?topic=natural-language-understanding-tone_analytics) + for more information. """ - self.limit = limit self.model = model @classmethod - def _from_dict(cls, _dict): - """Initialize a CategoriesOptions object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'ClassificationsOptions': + """Initialize a ClassificationsOptions object from a json dictionary.""" args = {} - if 'limit' in _dict: - args['limit'] = _dict.get('limit') - if 'model' in _dict: - args['model'] = _dict.get('model') + if (model := _dict.get('model')) is not None: + args['model'] = model return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a ClassificationsOptions object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'limit') and self.limit is not None: - _dict['limit'] = self.limit if hasattr(self, 'model') and self.model is not None: _dict['model'] = self.model return _dict - def __str__(self): - """Return a `str` version of this CategoriesOptions object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __eq__(self, other): + def __str__(self) -> str: + """Return a `str` version of this ClassificationsOptions object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ClassificationsOptions') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'ClassificationsOptions') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class CategoriesResult(object): +class ClassificationsResult: """ - A categorization of the analyzed text. + A classification of the analyzed text. - :attr str label: (optional) The path to the category through the 5-level taxonomy - hierarchy. For the complete list of categories, see the [Categories - hierarchy](https://cloud.ibm.com/docs/services/natural-language-understanding/categories.html#categories-hierarchy) - documentation. - :attr float score: (optional) Confidence score for the category classification. Higher - values indicate greater confidence. + :param str class_name: (optional) Classification assigned to the text. + :param float confidence: (optional) Confidence score for the classification. + Higher values indicate greater confidence. """ - def __init__(self, label=None, score=None): + def __init__( + self, + *, + class_name: Optional[str] = None, + confidence: Optional[float] = None, + ) -> None: """ - Initialize a CategoriesResult object. + Initialize a ClassificationsResult object. - :param str label: (optional) The path to the category through the 5-level taxonomy - hierarchy. For the complete list of categories, see the [Categories - hierarchy](https://cloud.ibm.com/docs/services/natural-language-understanding/categories.html#categories-hierarchy) - documentation. - :param float score: (optional) Confidence score for the category classification. - Higher values indicate greater confidence. + :param str class_name: (optional) Classification assigned to the text. + :param float confidence: (optional) Confidence score for the + classification. Higher values indicate greater confidence. """ - self.label = label - self.score = score + self.class_name = class_name + self.confidence = confidence @classmethod - def _from_dict(cls, _dict): - """Initialize a CategoriesResult object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'ClassificationsResult': + """Initialize a ClassificationsResult object from a json dictionary.""" args = {} - if 'label' in _dict: - args['label'] = _dict.get('label') - if 'score' in _dict: - args['score'] = _dict.get('score') + if (class_name := _dict.get('class_name')) is not None: + args['class_name'] = class_name + if (confidence := _dict.get('confidence')) is not None: + args['confidence'] = confidence return cls(**args) + @classmethod + def _from_dict(cls, _dict): + """Initialize a ClassificationsResult object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'class_name') and self.class_name is not None: + _dict['class_name'] = self.class_name + if hasattr(self, 'confidence') and self.confidence is not None: + _dict['confidence'] = self.confidence + return _dict + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ClassificationsResult object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ClassificationsResult') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ClassificationsResult') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ClassificationsTrainingParameters: + """ + Optional classifications training parameters along with model train requests. + + :param str model_type: (optional) Model type selector to train either a + single_label or a multi_label classifier. + """ + + def __init__( + self, + *, + model_type: Optional[str] = None, + ) -> None: + """ + Initialize a ClassificationsTrainingParameters object. + + :param str model_type: (optional) Model type selector to train either a + single_label or a multi_label classifier. + """ + self.model_type = model_type + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ClassificationsTrainingParameters': + """Initialize a ClassificationsTrainingParameters object from a json dictionary.""" + args = {} + if (model_type := _dict.get('model_type')) is not None: + args['model_type'] = model_type + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ClassificationsTrainingParameters object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'label') and self.label is not None: - _dict['label'] = self.label - if hasattr(self, 'score') and self.score is not None: - _dict['score'] = self.score + if hasattr(self, 'model_type') and self.model_type is not None: + _dict['model_type'] = self.model_type return _dict - def __str__(self): - """Return a `str` version of this CategoriesResult object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __eq__(self, other): + def __str__(self) -> str: + """Return a `str` version of this ClassificationsTrainingParameters object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ClassificationsTrainingParameters') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'ClassificationsTrainingParameters') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other + class ModelTypeEnum(str, Enum): + """ + Model type selector to train either a single_label or a multi_label classifier. + """ + + SINGLE_LABEL = 'single_label' + MULTI_LABEL = 'multi_label' -class ConceptsOptions(object): + +class ConceptsOptions: """ Returns high-level concepts in the content. For example, a research paper about deep learning might return the concept, "Artificial Intelligence" although the term is not @@ -778,10 +2514,14 @@ class ConceptsOptions(object): Supported languages: English, French, German, Italian, Japanese, Korean, Portuguese, Spanish. - :attr int limit: (optional) Maximum number of concepts to return. + :param int limit: (optional) Maximum number of concepts to return. """ - def __init__(self, limit=None): + def __init__( + self, + *, + limit: Optional[int] = None, + ) -> None: """ Initialize a ConceptsOptions object. @@ -790,72 +2530,93 @@ def __init__(self, limit=None): self.limit = limit @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'ConceptsOptions': """Initialize a ConceptsOptions object from a json dictionary.""" args = {} - if 'limit' in _dict: - args['limit'] = _dict.get('limit') + if (limit := _dict.get('limit')) is not None: + args['limit'] = limit return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a ConceptsOptions object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'limit') and self.limit is not None: _dict['limit'] = self.limit return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this ConceptsOptions object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'ConceptsOptions') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'ConceptsOptions') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class ConceptsResult(object): +class ConceptsResult: """ The general concepts referenced or alluded to in the analyzed text. - :attr str text: (optional) Name of the concept. - :attr float relevance: (optional) Relevance score between 0 and 1. Higher scores - indicate greater relevance. - :attr str dbpedia_resource: (optional) Link to the corresponding DBpedia resource. + :param str text: (optional) Name of the concept. + :param float relevance: (optional) Relevance score between 0 and 1. Higher + scores indicate greater relevance. + :param str dbpedia_resource: (optional) Link to the corresponding DBpedia + resource. """ - def __init__(self, text=None, relevance=None, dbpedia_resource=None): + def __init__( + self, + *, + text: Optional[str] = None, + relevance: Optional[float] = None, + dbpedia_resource: Optional[str] = None, + ) -> None: """ Initialize a ConceptsResult object. :param str text: (optional) Name of the concept. - :param float relevance: (optional) Relevance score between 0 and 1. Higher scores - indicate greater relevance. + :param float relevance: (optional) Relevance score between 0 and 1. Higher + scores indicate greater relevance. :param str dbpedia_resource: (optional) Link to the corresponding DBpedia - resource. + resource. """ self.text = text self.relevance = relevance self.dbpedia_resource = dbpedia_resource @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'ConceptsResult': """Initialize a ConceptsResult object from a json dictionary.""" args = {} - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'relevance' in _dict: - args['relevance'] = _dict.get('relevance') - if 'dbpedia_resource' in _dict: - args['dbpedia_resource'] = _dict.get('dbpedia_resource') + if (text := _dict.get('text')) is not None: + args['text'] = text + if (relevance := _dict.get('relevance')) is not None: + args['relevance'] = relevance + if (dbpedia_resource := _dict.get('dbpedia_resource')) is not None: + args['dbpedia_resource'] = dbpedia_resource return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a ConceptsResult object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'text') and self.text is not None: @@ -867,29 +2628,37 @@ def _to_dict(self): _dict['dbpedia_resource'] = self.dbpedia_resource return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this ConceptsResult object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'ConceptsResult') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'ConceptsResult') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class DeleteModelResults(object): +class DeleteModelResults: """ Delete model results. - :attr str deleted: (optional) model_id of the deleted model. + :param str deleted: (optional) model_id of the deleted model. """ - def __init__(self, deleted=None): + def __init__( + self, + *, + deleted: Optional[str] = None, + ) -> None: """ Initialize a DeleteModelResults object. @@ -898,70 +2667,91 @@ def __init__(self, deleted=None): self.deleted = deleted @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'DeleteModelResults': """Initialize a DeleteModelResults object from a json dictionary.""" args = {} - if 'deleted' in _dict: - args['deleted'] = _dict.get('deleted') + if (deleted := _dict.get('deleted')) is not None: + args['deleted'] = deleted return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a DeleteModelResults object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'deleted') and self.deleted is not None: _dict['deleted'] = self.deleted return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this DeleteModelResults object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'DeleteModelResults') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'DeleteModelResults') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class DisambiguationResult(object): +class DisambiguationResult: """ Disambiguation information for the entity. - :attr str name: (optional) Common entity name. - :attr str dbpedia_resource: (optional) Link to the corresponding DBpedia resource. - :attr list[str] subtype: (optional) Entity subtype information. + :param str name: (optional) Common entity name. + :param str dbpedia_resource: (optional) Link to the corresponding DBpedia + resource. + :param List[str] subtype: (optional) Entity subtype information. """ - def __init__(self, name=None, dbpedia_resource=None, subtype=None): + def __init__( + self, + *, + name: Optional[str] = None, + dbpedia_resource: Optional[str] = None, + subtype: Optional[List[str]] = None, + ) -> None: """ Initialize a DisambiguationResult object. :param str name: (optional) Common entity name. :param str dbpedia_resource: (optional) Link to the corresponding DBpedia - resource. - :param list[str] subtype: (optional) Entity subtype information. + resource. + :param List[str] subtype: (optional) Entity subtype information. """ self.name = name self.dbpedia_resource = dbpedia_resource self.subtype = subtype @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'DisambiguationResult': """Initialize a DisambiguationResult object from a json dictionary.""" args = {} - if 'name' in _dict: - args['name'] = _dict.get('name') - if 'dbpedia_resource' in _dict: - args['dbpedia_resource'] = _dict.get('dbpedia_resource') - if 'subtype' in _dict: - args['subtype'] = _dict.get('subtype') + if (name := _dict.get('name')) is not None: + args['name'] = name + if (dbpedia_resource := _dict.get('dbpedia_resource')) is not None: + args['dbpedia_resource'] = dbpedia_resource + if (subtype := _dict.get('subtype')) is not None: + args['subtype'] = subtype return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a DisambiguationResult object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'name') and self.name is not None: @@ -973,98 +2763,131 @@ def _to_dict(self): _dict['subtype'] = self.subtype return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this DisambiguationResult object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'DisambiguationResult') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'DisambiguationResult') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class DocumentEmotionResults(object): +class DocumentEmotionResults: """ Emotion results for the document as a whole. - :attr EmotionScores emotion: (optional) Emotion results for the document as a whole. + :param EmotionScores emotion: (optional) Emotion results for the document as a + whole. """ - def __init__(self, emotion=None): + def __init__( + self, + *, + emotion: Optional['EmotionScores'] = None, + ) -> None: """ Initialize a DocumentEmotionResults object. - :param EmotionScores emotion: (optional) Emotion results for the document as a - whole. + :param EmotionScores emotion: (optional) Emotion results for the document + as a whole. """ self.emotion = emotion @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'DocumentEmotionResults': """Initialize a DocumentEmotionResults object from a json dictionary.""" args = {} - if 'emotion' in _dict: - args['emotion'] = EmotionScores._from_dict(_dict.get('emotion')) + if (emotion := _dict.get('emotion')) is not None: + args['emotion'] = EmotionScores.from_dict(emotion) return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a DocumentEmotionResults object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'emotion') and self.emotion is not None: - _dict['emotion'] = self.emotion._to_dict() + if isinstance(self.emotion, dict): + _dict['emotion'] = self.emotion + else: + _dict['emotion'] = self.emotion.to_dict() return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this DocumentEmotionResults object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'DocumentEmotionResults') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'DocumentEmotionResults') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class DocumentSentimentResults(object): +class DocumentSentimentResults: """ DocumentSentimentResults. - :attr str label: (optional) Indicates whether the sentiment is positive, neutral, or - negative. - :attr float score: (optional) Sentiment score from -1 (negative) to 1 (positive). + :param str label: (optional) Indicates whether the sentiment is positive, + neutral, or negative. + :param float score: (optional) Sentiment score from -1 (negative) to 1 + (positive). """ - def __init__(self, label=None, score=None): + def __init__( + self, + *, + label: Optional[str] = None, + score: Optional[float] = None, + ) -> None: """ Initialize a DocumentSentimentResults object. - :param str label: (optional) Indicates whether the sentiment is positive, neutral, - or negative. - :param float score: (optional) Sentiment score from -1 (negative) to 1 (positive). + :param str label: (optional) Indicates whether the sentiment is positive, + neutral, or negative. + :param float score: (optional) Sentiment score from -1 (negative) to 1 + (positive). """ self.label = label self.score = score @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'DocumentSentimentResults': """Initialize a DocumentSentimentResults object from a json dictionary.""" args = {} - if 'label' in _dict: - args['label'] = _dict.get('label') - if 'score' in _dict: - args['score'] = _dict.get('score') + if (label := _dict.get('label')) is not None: + args['label'] = label + if (score := _dict.get('score')) is not None: + args['score'] = score return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a DocumentSentimentResults object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'label') and self.label is not None: @@ -1073,22 +2896,26 @@ def _to_dict(self): _dict['score'] = self.score return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this DocumentSentimentResults object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'DocumentSentimentResults') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'DocumentSentimentResults') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class EmotionOptions(object): +class EmotionOptions: """ Detects anger, disgust, fear, joy, or sadness that is conveyed in the content or by the context around target phrases specified in the targets parameter. You can analyze @@ -1096,35 +2923,45 @@ class EmotionOptions(object): `keywords.emotion`. Supported languages: English. - :attr bool document: (optional) Set this to `false` to hide document-level emotion - results. - :attr list[str] targets: (optional) Emotion results will be returned for each target - string that is found in the document. + :param bool document: (optional) Set this to `false` to hide document-level + emotion results. + :param List[str] targets: (optional) Emotion results will be returned for each + target string that is found in the document. """ - def __init__(self, document=None, targets=None): + def __init__( + self, + *, + document: Optional[bool] = None, + targets: Optional[List[str]] = None, + ) -> None: """ Initialize a EmotionOptions object. :param bool document: (optional) Set this to `false` to hide document-level - emotion results. - :param list[str] targets: (optional) Emotion results will be returned for each - target string that is found in the document. + emotion results. + :param List[str] targets: (optional) Emotion results will be returned for + each target string that is found in the document. """ self.document = document self.targets = targets @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'EmotionOptions': """Initialize a EmotionOptions object from a json dictionary.""" args = {} - if 'document' in _dict: - args['document'] = _dict.get('document') - if 'targets' in _dict: - args['targets'] = _dict.get('targets') + if (document := _dict.get('document')) is not None: + args['document'] = document + if (targets := _dict.get('targets')) is not None: + args['targets'] = targets return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a EmotionOptions object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'document') and self.document is not None: @@ -1133,118 +2970,146 @@ def _to_dict(self): _dict['targets'] = self.targets return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this EmotionOptions object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'EmotionOptions') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'EmotionOptions') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class EmotionResult(object): +class EmotionResult: """ The detected anger, disgust, fear, joy, or sadness that is conveyed by the content. Emotion information can be returned for detected entities, keywords, or user-specified target phrases found in the text. - :attr DocumentEmotionResults document: (optional) Emotion results for the document as - a whole. - :attr list[TargetedEmotionResults] targets: (optional) Emotion results for specified - targets. + :param DocumentEmotionResults document: (optional) Emotion results for the + document as a whole. + :param List[TargetedEmotionResults] targets: (optional) Emotion results for + specified targets. """ - def __init__(self, document=None, targets=None): + def __init__( + self, + *, + document: Optional['DocumentEmotionResults'] = None, + targets: Optional[List['TargetedEmotionResults']] = None, + ) -> None: """ Initialize a EmotionResult object. :param DocumentEmotionResults document: (optional) Emotion results for the - document as a whole. - :param list[TargetedEmotionResults] targets: (optional) Emotion results for - specified targets. + document as a whole. + :param List[TargetedEmotionResults] targets: (optional) Emotion results for + specified targets. """ self.document = document self.targets = targets @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'EmotionResult': """Initialize a EmotionResult object from a json dictionary.""" args = {} - if 'document' in _dict: - args['document'] = DocumentEmotionResults._from_dict( - _dict.get('document')) - if 'targets' in _dict: + if (document := _dict.get('document')) is not None: + args['document'] = DocumentEmotionResults.from_dict(document) + if (targets := _dict.get('targets')) is not None: args['targets'] = [ - TargetedEmotionResults._from_dict(x) - for x in (_dict.get('targets')) + TargetedEmotionResults.from_dict(v) for v in targets ] return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a EmotionResult object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'document') and self.document is not None: - _dict['document'] = self.document._to_dict() + if isinstance(self.document, dict): + _dict['document'] = self.document + else: + _dict['document'] = self.document.to_dict() if hasattr(self, 'targets') and self.targets is not None: - _dict['targets'] = [x._to_dict() for x in self.targets] + targets_list = [] + for v in self.targets: + if isinstance(v, dict): + targets_list.append(v) + else: + targets_list.append(v.to_dict()) + _dict['targets'] = targets_list return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this EmotionResult object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'EmotionResult') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'EmotionResult') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class EmotionScores(object): +class EmotionScores: """ EmotionScores. - :attr float anger: (optional) Anger score from 0 to 1. A higher score means that the - text is more likely to convey anger. - :attr float disgust: (optional) Disgust score from 0 to 1. A higher score means that - the text is more likely to convey disgust. - :attr float fear: (optional) Fear score from 0 to 1. A higher score means that the - text is more likely to convey fear. - :attr float joy: (optional) Joy score from 0 to 1. A higher score means that the text - is more likely to convey joy. - :attr float sadness: (optional) Sadness score from 0 to 1. A higher score means that - the text is more likely to convey sadness. - """ - - def __init__(self, - anger=None, - disgust=None, - fear=None, - joy=None, - sadness=None): + :param float anger: (optional) Anger score from 0 to 1. A higher score means + that the text is more likely to convey anger. + :param float disgust: (optional) Disgust score from 0 to 1. A higher score means + that the text is more likely to convey disgust. + :param float fear: (optional) Fear score from 0 to 1. A higher score means that + the text is more likely to convey fear. + :param float joy: (optional) Joy score from 0 to 1. A higher score means that + the text is more likely to convey joy. + :param float sadness: (optional) Sadness score from 0 to 1. A higher score means + that the text is more likely to convey sadness. + """ + + def __init__( + self, + *, + anger: Optional[float] = None, + disgust: Optional[float] = None, + fear: Optional[float] = None, + joy: Optional[float] = None, + sadness: Optional[float] = None, + ) -> None: """ Initialize a EmotionScores object. - :param float anger: (optional) Anger score from 0 to 1. A higher score means that - the text is more likely to convey anger. - :param float disgust: (optional) Disgust score from 0 to 1. A higher score means - that the text is more likely to convey disgust. - :param float fear: (optional) Fear score from 0 to 1. A higher score means that - the text is more likely to convey fear. - :param float joy: (optional) Joy score from 0 to 1. A higher score means that the - text is more likely to convey joy. - :param float sadness: (optional) Sadness score from 0 to 1. A higher score means - that the text is more likely to convey sadness. + :param float anger: (optional) Anger score from 0 to 1. A higher score + means that the text is more likely to convey anger. + :param float disgust: (optional) Disgust score from 0 to 1. A higher score + means that the text is more likely to convey disgust. + :param float fear: (optional) Fear score from 0 to 1. A higher score means + that the text is more likely to convey fear. + :param float joy: (optional) Joy score from 0 to 1. A higher score means + that the text is more likely to convey joy. + :param float sadness: (optional) Sadness score from 0 to 1. A higher score + means that the text is more likely to convey sadness. """ self.anger = anger self.disgust = disgust @@ -1253,22 +3118,27 @@ def __init__(self, self.sadness = sadness @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'EmotionScores': """Initialize a EmotionScores object from a json dictionary.""" args = {} - if 'anger' in _dict: - args['anger'] = _dict.get('anger') - if 'disgust' in _dict: - args['disgust'] = _dict.get('disgust') - if 'fear' in _dict: - args['fear'] = _dict.get('fear') - if 'joy' in _dict: - args['joy'] = _dict.get('joy') - if 'sadness' in _dict: - args['sadness'] = _dict.get('sadness') + if (anger := _dict.get('anger')) is not None: + args['anger'] = anger + if (disgust := _dict.get('disgust')) is not None: + args['disgust'] = disgust + if (fear := _dict.get('fear')) is not None: + args['fear'] = fear + if (joy := _dict.get('joy')) is not None: + args['joy'] = joy + if (sadness := _dict.get('sadness')) is not None: + args['sadness'] = sadness return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a EmotionScores object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'anger') and self.anger is not None: @@ -1283,61 +3153,68 @@ def _to_dict(self): _dict['sadness'] = self.sadness return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this EmotionScores object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'EmotionScores') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'EmotionScores') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class EntitiesOptions(object): +class EntitiesOptions: """ - Identifies people, cities, organizations, and other entities in the content. See - [Entity types and - subtypes](https://cloud.ibm.com/docs/services/natural-language-understanding/entity-types.html). + Identifies people, cities, organizations, and other entities in the content. For more + information, see [Entity types and + subtypes](https://cloud.ibm.com/docs/natural-language-understanding?topic=natural-language-understanding-entity-type-systems). Supported languages: English, French, German, Italian, Japanese, Korean, Portuguese, Russian, Spanish, Swedish. Arabic, Chinese, and Dutch are supported only through custom models. - :attr int limit: (optional) Maximum number of entities to return. - :attr bool mentions: (optional) Set this to `true` to return locations of entity - mentions. - :attr str model: (optional) Enter a [custom - model](https://cloud.ibm.com/docs/services/natural-language-understanding/customizing.html) - ID to override the standard entity detection model. - :attr bool sentiment: (optional) Set this to `true` to return sentiment information - for detected entities. - :attr bool emotion: (optional) Set this to `true` to analyze emotion for detected - keywords. - """ - - def __init__(self, - limit=None, - mentions=None, - model=None, - sentiment=None, - emotion=None): + :param int limit: (optional) Maximum number of entities to return. + :param bool mentions: (optional) Set this to `true` to return locations of + entity mentions. + :param str model: (optional) Enter a [custom + model](https://cloud.ibm.com/docs/natural-language-understanding?topic=natural-language-understanding-customizing) + ID to override the standard entity detection model. + :param bool sentiment: (optional) Set this to `true` to return sentiment + information for detected entities. + :param bool emotion: (optional) Set this to `true` to analyze emotion for + detected keywords. + """ + + def __init__( + self, + *, + limit: Optional[int] = None, + mentions: Optional[bool] = None, + model: Optional[str] = None, + sentiment: Optional[bool] = None, + emotion: Optional[bool] = None, + ) -> None: """ Initialize a EntitiesOptions object. :param int limit: (optional) Maximum number of entities to return. - :param bool mentions: (optional) Set this to `true` to return locations of entity - mentions. + :param bool mentions: (optional) Set this to `true` to return locations of + entity mentions. :param str model: (optional) Enter a [custom - model](https://cloud.ibm.com/docs/services/natural-language-understanding/customizing.html) - ID to override the standard entity detection model. + model](https://cloud.ibm.com/docs/natural-language-understanding?topic=natural-language-understanding-customizing) + ID to override the standard entity detection model. :param bool sentiment: (optional) Set this to `true` to return sentiment - information for detected entities. - :param bool emotion: (optional) Set this to `true` to analyze emotion for detected - keywords. + information for detected entities. + :param bool emotion: (optional) Set this to `true` to analyze emotion for + detected keywords. """ self.limit = limit self.mentions = mentions @@ -1346,22 +3223,27 @@ def __init__(self, self.emotion = emotion @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'EntitiesOptions': """Initialize a EntitiesOptions object from a json dictionary.""" args = {} - if 'limit' in _dict: - args['limit'] = _dict.get('limit') - if 'mentions' in _dict: - args['mentions'] = _dict.get('mentions') - if 'model' in _dict: - args['model'] = _dict.get('model') - if 'sentiment' in _dict: - args['sentiment'] = _dict.get('sentiment') - if 'emotion' in _dict: - args['emotion'] = _dict.get('emotion') + if (limit := _dict.get('limit')) is not None: + args['limit'] = limit + if (mentions := _dict.get('mentions')) is not None: + args['mentions'] = mentions + if (model := _dict.get('model')) is not None: + args['model'] = model + if (sentiment := _dict.get('sentiment')) is not None: + args['sentiment'] = sentiment + if (emotion := _dict.get('emotion')) is not None: + args['emotion'] = emotion return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a EntitiesOptions object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'limit') and self.limit is not None: @@ -1376,68 +3258,88 @@ def _to_dict(self): _dict['emotion'] = self.emotion return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this EntitiesOptions object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'EntitiesOptions') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'EntitiesOptions') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class EntitiesResult(object): +class EntitiesResult: """ The important people, places, geopolitical entities and other types of entities in your content. - :attr str type: (optional) Entity type. - :attr str text: (optional) The name of the entity. - :attr float relevance: (optional) Relevance score from 0 to 1. Higher values indicate - greater relevance. - :attr list[EntityMention] mentions: (optional) Entity mentions and locations. - :attr int count: (optional) How many times the entity was mentioned in the text. - :attr EmotionScores emotion: (optional) Emotion analysis results for the entity, - enabled with the `emotion` option. - :attr FeatureSentimentResults sentiment: (optional) Sentiment analysis results for the - entity, enabled with the `sentiment` option. - :attr DisambiguationResult disambiguation: (optional) Disambiguation information for - the entity. - """ - - def __init__(self, - type=None, - text=None, - relevance=None, - mentions=None, - count=None, - emotion=None, - sentiment=None, - disambiguation=None): + :param str type: (optional) Entity type. + :param str text: (optional) The name of the entity. + :param float relevance: (optional) Relevance score from 0 to 1. Higher values + indicate greater relevance. + :param float confidence: (optional) Confidence in the entity identification from + 0 to 1. Higher values indicate higher confidence. In standard entities requests, + confidence is returned only for English text. All entities requests that use + custom models return the confidence score. + :param List[EntityMention] mentions: (optional) Entity mentions and locations. + :param int count: (optional) How many times the entity was mentioned in the + text. + :param EmotionScores emotion: (optional) Emotion analysis results for the + entity, enabled with the `emotion` option. + :param FeatureSentimentResults sentiment: (optional) Sentiment analysis results + for the entity, enabled with the `sentiment` option. + :param DisambiguationResult disambiguation: (optional) Disambiguation + information for the entity. + """ + + def __init__( + self, + *, + type: Optional[str] = None, + text: Optional[str] = None, + relevance: Optional[float] = None, + confidence: Optional[float] = None, + mentions: Optional[List['EntityMention']] = None, + count: Optional[int] = None, + emotion: Optional['EmotionScores'] = None, + sentiment: Optional['FeatureSentimentResults'] = None, + disambiguation: Optional['DisambiguationResult'] = None, + ) -> None: """ Initialize a EntitiesResult object. :param str type: (optional) Entity type. :param str text: (optional) The name of the entity. - :param float relevance: (optional) Relevance score from 0 to 1. Higher values - indicate greater relevance. - :param list[EntityMention] mentions: (optional) Entity mentions and locations. - :param int count: (optional) How many times the entity was mentioned in the text. - :param EmotionScores emotion: (optional) Emotion analysis results for the entity, - enabled with the `emotion` option. - :param FeatureSentimentResults sentiment: (optional) Sentiment analysis results - for the entity, enabled with the `sentiment` option. - :param DisambiguationResult disambiguation: (optional) Disambiguation information - for the entity. + :param float relevance: (optional) Relevance score from 0 to 1. Higher + values indicate greater relevance. + :param float confidence: (optional) Confidence in the entity identification + from 0 to 1. Higher values indicate higher confidence. In standard entities + requests, confidence is returned only for English text. All entities + requests that use custom models return the confidence score. + :param List[EntityMention] mentions: (optional) Entity mentions and + locations. + :param int count: (optional) How many times the entity was mentioned in the + text. + :param EmotionScores emotion: (optional) Emotion analysis results for the + entity, enabled with the `emotion` option. + :param FeatureSentimentResults sentiment: (optional) Sentiment analysis + results for the entity, enabled with the `sentiment` option. + :param DisambiguationResult disambiguation: (optional) Disambiguation + information for the entity. """ self.type = type self.text = text self.relevance = relevance + self.confidence = confidence self.mentions = mentions self.count = count self.emotion = emotion @@ -1445,32 +3347,36 @@ def __init__(self, self.disambiguation = disambiguation @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'EntitiesResult': """Initialize a EntitiesResult object from a json dictionary.""" args = {} - if 'type' in _dict: - args['type'] = _dict.get('type') - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'relevance' in _dict: - args['relevance'] = _dict.get('relevance') - if 'mentions' in _dict: - args['mentions'] = [ - EntityMention._from_dict(x) for x in (_dict.get('mentions')) - ] - if 'count' in _dict: - args['count'] = _dict.get('count') - if 'emotion' in _dict: - args['emotion'] = EmotionScores._from_dict(_dict.get('emotion')) - if 'sentiment' in _dict: - args['sentiment'] = FeatureSentimentResults._from_dict( - _dict.get('sentiment')) - if 'disambiguation' in _dict: - args['disambiguation'] = DisambiguationResult._from_dict( - _dict.get('disambiguation')) + if (type := _dict.get('type')) is not None: + args['type'] = type + if (text := _dict.get('text')) is not None: + args['text'] = text + if (relevance := _dict.get('relevance')) is not None: + args['relevance'] = relevance + if (confidence := _dict.get('confidence')) is not None: + args['confidence'] = confidence + if (mentions := _dict.get('mentions')) is not None: + args['mentions'] = [EntityMention.from_dict(v) for v in mentions] + if (count := _dict.get('count')) is not None: + args['count'] = count + if (emotion := _dict.get('emotion')) is not None: + args['emotion'] = EmotionScores.from_dict(emotion) + if (sentiment := _dict.get('sentiment')) is not None: + args['sentiment'] = FeatureSentimentResults.from_dict(sentiment) + if (disambiguation := _dict.get('disambiguation')) is not None: + args['disambiguation'] = DisambiguationResult.from_dict( + disambiguation) return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a EntitiesResult object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'type') and self.type is not None: @@ -1479,240 +3385,321 @@ def _to_dict(self): _dict['text'] = self.text if hasattr(self, 'relevance') and self.relevance is not None: _dict['relevance'] = self.relevance + if hasattr(self, 'confidence') and self.confidence is not None: + _dict['confidence'] = self.confidence if hasattr(self, 'mentions') and self.mentions is not None: - _dict['mentions'] = [x._to_dict() for x in self.mentions] + mentions_list = [] + for v in self.mentions: + if isinstance(v, dict): + mentions_list.append(v) + else: + mentions_list.append(v.to_dict()) + _dict['mentions'] = mentions_list if hasattr(self, 'count') and self.count is not None: _dict['count'] = self.count if hasattr(self, 'emotion') and self.emotion is not None: - _dict['emotion'] = self.emotion._to_dict() + if isinstance(self.emotion, dict): + _dict['emotion'] = self.emotion + else: + _dict['emotion'] = self.emotion.to_dict() if hasattr(self, 'sentiment') and self.sentiment is not None: - _dict['sentiment'] = self.sentiment._to_dict() + if isinstance(self.sentiment, dict): + _dict['sentiment'] = self.sentiment + else: + _dict['sentiment'] = self.sentiment.to_dict() if hasattr(self, 'disambiguation') and self.disambiguation is not None: - _dict['disambiguation'] = self.disambiguation._to_dict() + if isinstance(self.disambiguation, dict): + _dict['disambiguation'] = self.disambiguation + else: + _dict['disambiguation'] = self.disambiguation.to_dict() return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this EntitiesResult object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'EntitiesResult') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'EntitiesResult') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class EntityMention(object): +class EntityMention: """ EntityMention. - :attr str text: (optional) Entity mention text. - :attr list[int] location: (optional) Character offsets indicating the beginning and - end of the mention in the analyzed text. + :param str text: (optional) Entity mention text. + :param List[int] location: (optional) Character offsets indicating the beginning + and end of the mention in the analyzed text. + :param float confidence: (optional) Confidence in the entity identification from + 0 to 1. Higher values indicate higher confidence. In standard entities requests, + confidence is returned only for English text. All entities requests that use + custom models return the confidence score. """ - def __init__(self, text=None, location=None): + def __init__( + self, + *, + text: Optional[str] = None, + location: Optional[List[int]] = None, + confidence: Optional[float] = None, + ) -> None: """ Initialize a EntityMention object. :param str text: (optional) Entity mention text. - :param list[int] location: (optional) Character offsets indicating the beginning - and end of the mention in the analyzed text. + :param List[int] location: (optional) Character offsets indicating the + beginning and end of the mention in the analyzed text. + :param float confidence: (optional) Confidence in the entity identification + from 0 to 1. Higher values indicate higher confidence. In standard entities + requests, confidence is returned only for English text. All entities + requests that use custom models return the confidence score. """ self.text = text self.location = location + self.confidence = confidence @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'EntityMention': """Initialize a EntityMention object from a json dictionary.""" args = {} - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'location' in _dict: - args['location'] = _dict.get('location') + if (text := _dict.get('text')) is not None: + args['text'] = text + if (location := _dict.get('location')) is not None: + args['location'] = location + if (confidence := _dict.get('confidence')) is not None: + args['confidence'] = confidence return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a EntityMention object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'text') and self.text is not None: _dict['text'] = self.text if hasattr(self, 'location') and self.location is not None: _dict['location'] = self.location + if hasattr(self, 'confidence') and self.confidence is not None: + _dict['confidence'] = self.confidence return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this EntityMention object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'EntityMention') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'EntityMention') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class FeatureSentimentResults(object): +class FeatureSentimentResults: """ FeatureSentimentResults. - :attr float score: (optional) Sentiment score from -1 (negative) to 1 (positive). + :param float score: (optional) Sentiment score from -1 (negative) to 1 + (positive). """ - def __init__(self, score=None): + def __init__( + self, + *, + score: Optional[float] = None, + ) -> None: """ Initialize a FeatureSentimentResults object. - :param float score: (optional) Sentiment score from -1 (negative) to 1 (positive). + :param float score: (optional) Sentiment score from -1 (negative) to 1 + (positive). """ self.score = score @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'FeatureSentimentResults': """Initialize a FeatureSentimentResults object from a json dictionary.""" args = {} - if 'score' in _dict: - args['score'] = _dict.get('score') + if (score := _dict.get('score')) is not None: + args['score'] = score return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a FeatureSentimentResults object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'score') and self.score is not None: _dict['score'] = self.score return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this FeatureSentimentResults object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'FeatureSentimentResults') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'FeatureSentimentResults') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class Features(object): +class Features: """ Analysis features and options. - :attr ConceptsOptions concepts: (optional) Returns high-level concepts in the content. - For example, a research paper about deep learning might return the concept, - "Artificial Intelligence" although the term is not mentioned. - Supported languages: English, French, German, Italian, Japanese, Korean, Portuguese, - Spanish. - :attr EmotionOptions emotion: (optional) Detects anger, disgust, fear, joy, or sadness - that is conveyed in the content or by the context around target phrases specified in - the targets parameter. You can analyze emotion for detected entities with - `entities.emotion` and for keywords with `keywords.emotion`. - Supported languages: English. - :attr EntitiesOptions entities: (optional) Identifies people, cities, organizations, - and other entities in the content. See [Entity types and - subtypes](https://cloud.ibm.com/docs/services/natural-language-understanding/entity-types.html). - Supported languages: English, French, German, Italian, Japanese, Korean, Portuguese, - Russian, Spanish, Swedish. Arabic, Chinese, and Dutch are supported only through - custom models. - :attr KeywordsOptions keywords: (optional) Returns important keywords in the content. - Supported languages: English, French, German, Italian, Japanese, Korean, Portuguese, - Russian, Spanish, Swedish. - :attr MetadataOptions metadata: (optional) Returns information from the document, - including author name, title, RSS/ATOM feeds, prominent page image, and publication - date. Supports URL and HTML input types only. - :attr RelationsOptions relations: (optional) Recognizes when two entities are related - and identifies the type of relation. For example, an `awardedTo` relation might - connect the entities "Nobel Prize" and "Albert Einstein". See [Relation - types](https://cloud.ibm.com/docs/services/natural-language-understanding/relations.html). - Supported languages: Arabic, English, German, Japanese, Korean, Spanish. Chinese, - Dutch, French, Italian, and Portuguese custom models are also supported. - :attr SemanticRolesOptions semantic_roles: (optional) Parses sentences into subject, - action, and object form. - Supported languages: English, German, Japanese, Korean, Spanish. - :attr SentimentOptions sentiment: (optional) Analyzes the general sentiment of your - content or the sentiment toward specific target phrases. You can analyze sentiment for - detected entities with `entities.sentiment` and for keywords with - `keywords.sentiment`. - Supported languages: Arabic, English, French, German, Italian, Japanese, Korean, - Portuguese, Russian, Spanish. - :attr CategoriesOptions categories: (optional) Returns a five-level taxonomy of the - content. The top three categories are returned. - Supported languages: Arabic, English, French, German, Italian, Japanese, Korean, - Portuguese, Spanish. - :attr SyntaxOptions syntax: (optional) Returns tokens and sentences from the input - text. - """ - - def __init__(self, - concepts=None, - emotion=None, - entities=None, - keywords=None, - metadata=None, - relations=None, - semantic_roles=None, - sentiment=None, - categories=None, - syntax=None): + :param ClassificationsOptions classifications: (optional) Returns text + classifications for the content. + :param ConceptsOptions concepts: (optional) Returns high-level concepts in the + content. For example, a research paper about deep learning might return the + concept, "Artificial Intelligence" although the term is not mentioned. + Supported languages: English, French, German, Italian, Japanese, Korean, + Portuguese, Spanish. + :param EmotionOptions emotion: (optional) Detects anger, disgust, fear, joy, or + sadness that is conveyed in the content or by the context around target phrases + specified in the targets parameter. You can analyze emotion for detected + entities with `entities.emotion` and for keywords with `keywords.emotion`. + Supported languages: English. + :param EntitiesOptions entities: (optional) Identifies people, cities, + organizations, and other entities in the content. For more information, see + [Entity types and + subtypes](https://cloud.ibm.com/docs/natural-language-understanding?topic=natural-language-understanding-entity-type-systems). + Supported languages: English, French, German, Italian, Japanese, Korean, + Portuguese, Russian, Spanish, Swedish. Arabic, Chinese, and Dutch are supported + only through custom models. + :param KeywordsOptions keywords: (optional) Returns important keywords in the + content. + Supported languages: English, French, German, Italian, Japanese, Korean, + Portuguese, Russian, Spanish, Swedish. + :param dict metadata: (optional) Returns information from the document, + including author name, title, RSS/ATOM feeds, prominent page image, and + publication date. Supports URL and HTML input types only. + :param RelationsOptions relations: (optional) Recognizes when two entities are + related and identifies the type of relation. For example, an `awardedTo` + relation might connect the entities "Nobel Prize" and "Albert Einstein". For + more information, see [Relation + types](https://cloud.ibm.com/docs/natural-language-understanding?topic=natural-language-understanding-relations). + Supported languages: Arabic, English, German, Japanese, Korean, Spanish. + Chinese, Dutch, French, Italian, and Portuguese custom models are also + supported. + :param SemanticRolesOptions semantic_roles: (optional) Parses sentences into + subject, action, and object form. + Supported languages: English, German, Japanese, Korean, Spanish. + :param SentimentOptions sentiment: (optional) Analyzes the general sentiment of + your content or the sentiment toward specific target phrases. You can analyze + sentiment for detected entities with `entities.sentiment` and for keywords with + `keywords.sentiment`. + Supported languages: Arabic, English, French, German, Italian, Japanese, + Korean, Portuguese, Russian, Spanish. + :param CategoriesOptions categories: (optional) Returns a hierarchical taxonomy + of the content. The top three categories are returned by default. + Supported languages: Arabic, English, French, German, Italian, Japanese, Korean, + Portuguese, Spanish. + :param SyntaxOptions syntax: (optional) Returns tokens and sentences from the + input text. + """ + + def __init__( + self, + *, + classifications: Optional['ClassificationsOptions'] = None, + concepts: Optional['ConceptsOptions'] = None, + emotion: Optional['EmotionOptions'] = None, + entities: Optional['EntitiesOptions'] = None, + keywords: Optional['KeywordsOptions'] = None, + metadata: Optional[dict] = None, + relations: Optional['RelationsOptions'] = None, + semantic_roles: Optional['SemanticRolesOptions'] = None, + sentiment: Optional['SentimentOptions'] = None, + categories: Optional['CategoriesOptions'] = None, + syntax: Optional['SyntaxOptions'] = None, + ) -> None: """ Initialize a Features object. - :param ConceptsOptions concepts: (optional) Returns high-level concepts in the - content. For example, a research paper about deep learning might return the - concept, "Artificial Intelligence" although the term is not mentioned. - Supported languages: English, French, German, Italian, Japanese, Korean, - Portuguese, Spanish. - :param EmotionOptions emotion: (optional) Detects anger, disgust, fear, joy, or - sadness that is conveyed in the content or by the context around target phrases - specified in the targets parameter. You can analyze emotion for detected entities - with `entities.emotion` and for keywords with `keywords.emotion`. - Supported languages: English. + :param ClassificationsOptions classifications: (optional) Returns text + classifications for the content. + :param ConceptsOptions concepts: (optional) Returns high-level concepts in + the content. For example, a research paper about deep learning might return + the concept, "Artificial Intelligence" although the term is not mentioned. + Supported languages: English, French, German, Italian, Japanese, Korean, + Portuguese, Spanish. + :param EmotionOptions emotion: (optional) Detects anger, disgust, fear, + joy, or sadness that is conveyed in the content or by the context around + target phrases specified in the targets parameter. You can analyze emotion + for detected entities with `entities.emotion` and for keywords with + `keywords.emotion`. + Supported languages: English. :param EntitiesOptions entities: (optional) Identifies people, cities, - organizations, and other entities in the content. See [Entity types and - subtypes](https://cloud.ibm.com/docs/services/natural-language-understanding/entity-types.html). - Supported languages: English, French, German, Italian, Japanese, Korean, - Portuguese, Russian, Spanish, Swedish. Arabic, Chinese, and Dutch are supported - only through custom models. - :param KeywordsOptions keywords: (optional) Returns important keywords in the - content. - Supported languages: English, French, German, Italian, Japanese, Korean, - Portuguese, Russian, Spanish, Swedish. - :param MetadataOptions metadata: (optional) Returns information from the document, - including author name, title, RSS/ATOM feeds, prominent page image, and - publication date. Supports URL and HTML input types only. - :param RelationsOptions relations: (optional) Recognizes when two entities are - related and identifies the type of relation. For example, an `awardedTo` relation - might connect the entities "Nobel Prize" and "Albert Einstein". See [Relation - types](https://cloud.ibm.com/docs/services/natural-language-understanding/relations.html). - Supported languages: Arabic, English, German, Japanese, Korean, Spanish. Chinese, - Dutch, French, Italian, and Portuguese custom models are also supported. - :param SemanticRolesOptions semantic_roles: (optional) Parses sentences into - subject, action, and object form. - Supported languages: English, German, Japanese, Korean, Spanish. - :param SentimentOptions sentiment: (optional) Analyzes the general sentiment of - your content or the sentiment toward specific target phrases. You can analyze - sentiment for detected entities with `entities.sentiment` and for keywords with - `keywords.sentiment`. - Supported languages: Arabic, English, French, German, Italian, Japanese, Korean, - Portuguese, Russian, Spanish. - :param CategoriesOptions categories: (optional) Returns a five-level taxonomy of - the content. The top three categories are returned. - Supported languages: Arabic, English, French, German, Italian, Japanese, Korean, - Portuguese, Spanish. - :param SyntaxOptions syntax: (optional) Returns tokens and sentences from the - input text. + organizations, and other entities in the content. For more information, see + [Entity types and + subtypes](https://cloud.ibm.com/docs/natural-language-understanding?topic=natural-language-understanding-entity-type-systems). + Supported languages: English, French, German, Italian, Japanese, Korean, + Portuguese, Russian, Spanish, Swedish. Arabic, Chinese, and Dutch are + supported only through custom models. + :param KeywordsOptions keywords: (optional) Returns important keywords in + the content. + Supported languages: English, French, German, Italian, Japanese, Korean, + Portuguese, Russian, Spanish, Swedish. + :param dict metadata: (optional) Returns information from the document, + including author name, title, RSS/ATOM feeds, prominent page image, and + publication date. Supports URL and HTML input types only. + :param RelationsOptions relations: (optional) Recognizes when two entities + are related and identifies the type of relation. For example, an + `awardedTo` relation might connect the entities "Nobel Prize" and "Albert + Einstein". For more information, see [Relation + types](https://cloud.ibm.com/docs/natural-language-understanding?topic=natural-language-understanding-relations). + Supported languages: Arabic, English, German, Japanese, Korean, Spanish. + Chinese, Dutch, French, Italian, and Portuguese custom models are also + supported. + :param SemanticRolesOptions semantic_roles: (optional) Parses sentences + into subject, action, and object form. + Supported languages: English, German, Japanese, Korean, Spanish. + :param SentimentOptions sentiment: (optional) Analyzes the general + sentiment of your content or the sentiment toward specific target phrases. + You can analyze sentiment for detected entities with `entities.sentiment` + and for keywords with `keywords.sentiment`. + Supported languages: Arabic, English, French, German, Italian, Japanese, + Korean, Portuguese, Russian, Spanish. + :param CategoriesOptions categories: (optional) Returns a hierarchical + taxonomy of the content. The top three categories are returned by default. + Supported languages: Arabic, English, French, German, Italian, Japanese, + Korean, Portuguese, Spanish. + :param SyntaxOptions syntax: (optional) Returns tokens and sentences from + the input text. """ + self.classifications = classifications self.concepts = concepts self.emotion = emotion self.entities = entities @@ -1725,83 +3712,234 @@ def __init__(self, self.syntax = syntax @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'Features': """Initialize a Features object from a json dictionary.""" args = {} - if 'concepts' in _dict: - args['concepts'] = ConceptsOptions._from_dict(_dict.get('concepts')) - if 'emotion' in _dict: - args['emotion'] = EmotionOptions._from_dict(_dict.get('emotion')) - if 'entities' in _dict: - args['entities'] = EntitiesOptions._from_dict(_dict.get('entities')) - if 'keywords' in _dict: - args['keywords'] = KeywordsOptions._from_dict(_dict.get('keywords')) - if 'metadata' in _dict: - args['metadata'] = MetadataOptions._from_dict(_dict.get('metadata')) - if 'relations' in _dict: - args['relations'] = RelationsOptions._from_dict( - _dict.get('relations')) - if 'semantic_roles' in _dict: - args['semantic_roles'] = SemanticRolesOptions._from_dict( - _dict.get('semantic_roles')) - if 'sentiment' in _dict: - args['sentiment'] = SentimentOptions._from_dict( - _dict.get('sentiment')) - if 'categories' in _dict: - args['categories'] = CategoriesOptions._from_dict( - _dict.get('categories')) - if 'syntax' in _dict: - args['syntax'] = SyntaxOptions._from_dict(_dict.get('syntax')) + if (classifications := _dict.get('classifications')) is not None: + args['classifications'] = ClassificationsOptions.from_dict( + classifications) + if (concepts := _dict.get('concepts')) is not None: + args['concepts'] = ConceptsOptions.from_dict(concepts) + if (emotion := _dict.get('emotion')) is not None: + args['emotion'] = EmotionOptions.from_dict(emotion) + if (entities := _dict.get('entities')) is not None: + args['entities'] = EntitiesOptions.from_dict(entities) + if (keywords := _dict.get('keywords')) is not None: + args['keywords'] = KeywordsOptions.from_dict(keywords) + if (metadata := _dict.get('metadata')) is not None: + args['metadata'] = metadata + if (relations := _dict.get('relations')) is not None: + args['relations'] = RelationsOptions.from_dict(relations) + if (semantic_roles := _dict.get('semantic_roles')) is not None: + args['semantic_roles'] = SemanticRolesOptions.from_dict( + semantic_roles) + if (sentiment := _dict.get('sentiment')) is not None: + args['sentiment'] = SentimentOptions.from_dict(sentiment) + if (categories := _dict.get('categories')) is not None: + args['categories'] = CategoriesOptions.from_dict(categories) + if (syntax := _dict.get('syntax')) is not None: + args['syntax'] = SyntaxOptions.from_dict(syntax) return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a Features object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} + if hasattr(self, + 'classifications') and self.classifications is not None: + if isinstance(self.classifications, dict): + _dict['classifications'] = self.classifications + else: + _dict['classifications'] = self.classifications.to_dict() if hasattr(self, 'concepts') and self.concepts is not None: - _dict['concepts'] = self.concepts._to_dict() + if isinstance(self.concepts, dict): + _dict['concepts'] = self.concepts + else: + _dict['concepts'] = self.concepts.to_dict() if hasattr(self, 'emotion') and self.emotion is not None: - _dict['emotion'] = self.emotion._to_dict() + if isinstance(self.emotion, dict): + _dict['emotion'] = self.emotion + else: + _dict['emotion'] = self.emotion.to_dict() if hasattr(self, 'entities') and self.entities is not None: - _dict['entities'] = self.entities._to_dict() + if isinstance(self.entities, dict): + _dict['entities'] = self.entities + else: + _dict['entities'] = self.entities.to_dict() if hasattr(self, 'keywords') and self.keywords is not None: - _dict['keywords'] = self.keywords._to_dict() + if isinstance(self.keywords, dict): + _dict['keywords'] = self.keywords + else: + _dict['keywords'] = self.keywords.to_dict() if hasattr(self, 'metadata') and self.metadata is not None: - _dict['metadata'] = self.metadata._to_dict() + _dict['metadata'] = self.metadata if hasattr(self, 'relations') and self.relations is not None: - _dict['relations'] = self.relations._to_dict() + if isinstance(self.relations, dict): + _dict['relations'] = self.relations + else: + _dict['relations'] = self.relations.to_dict() if hasattr(self, 'semantic_roles') and self.semantic_roles is not None: - _dict['semantic_roles'] = self.semantic_roles._to_dict() + if isinstance(self.semantic_roles, dict): + _dict['semantic_roles'] = self.semantic_roles + else: + _dict['semantic_roles'] = self.semantic_roles.to_dict() if hasattr(self, 'sentiment') and self.sentiment is not None: - _dict['sentiment'] = self.sentiment._to_dict() + if isinstance(self.sentiment, dict): + _dict['sentiment'] = self.sentiment + else: + _dict['sentiment'] = self.sentiment.to_dict() if hasattr(self, 'categories') and self.categories is not None: - _dict['categories'] = self.categories._to_dict() + if isinstance(self.categories, dict): + _dict['categories'] = self.categories + else: + _dict['categories'] = self.categories.to_dict() if hasattr(self, 'syntax') and self.syntax is not None: - _dict['syntax'] = self.syntax._to_dict() + if isinstance(self.syntax, dict): + _dict['syntax'] = self.syntax + else: + _dict['syntax'] = self.syntax.to_dict() return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this Features object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'Features') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'Features') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class FeaturesResultsMetadata: + """ + Webpage metadata, such as the author and the title of the page. + + :param List[Author] authors: (optional) The authors of the document. + :param str publication_date: (optional) The publication date in the format ISO + 8601. + :param str title: (optional) The title of the document. + :param str image: (optional) URL of a prominent image on the webpage. + :param List[Feed] feeds: (optional) RSS/ATOM feeds found on the webpage. + """ - def __eq__(self, other): + def __init__( + self, + *, + authors: Optional[List['Author']] = None, + publication_date: Optional[str] = None, + title: Optional[str] = None, + image: Optional[str] = None, + feeds: Optional[List['Feed']] = None, + ) -> None: + """ + Initialize a FeaturesResultsMetadata object. + + :param List[Author] authors: (optional) The authors of the document. + :param str publication_date: (optional) The publication date in the format + ISO 8601. + :param str title: (optional) The title of the document. + :param str image: (optional) URL of a prominent image on the webpage. + :param List[Feed] feeds: (optional) RSS/ATOM feeds found on the webpage. + """ + self.authors = authors + self.publication_date = publication_date + self.title = title + self.image = image + self.feeds = feeds + + @classmethod + def from_dict(cls, _dict: Dict) -> 'FeaturesResultsMetadata': + """Initialize a FeaturesResultsMetadata object from a json dictionary.""" + args = {} + if (authors := _dict.get('authors')) is not None: + args['authors'] = [Author.from_dict(v) for v in authors] + if (publication_date := _dict.get('publication_date')) is not None: + args['publication_date'] = publication_date + if (title := _dict.get('title')) is not None: + args['title'] = title + if (image := _dict.get('image')) is not None: + args['image'] = image + if (feeds := _dict.get('feeds')) is not None: + args['feeds'] = [Feed.from_dict(v) for v in feeds] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a FeaturesResultsMetadata object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'authors') and self.authors is not None: + authors_list = [] + for v in self.authors: + if isinstance(v, dict): + authors_list.append(v) + else: + authors_list.append(v.to_dict()) + _dict['authors'] = authors_list + if hasattr(self, + 'publication_date') and self.publication_date is not None: + _dict['publication_date'] = self.publication_date + if hasattr(self, 'title') and self.title is not None: + _dict['title'] = self.title + if hasattr(self, 'image') and self.image is not None: + _dict['image'] = self.image + if hasattr(self, 'feeds') and self.feeds is not None: + feeds_list = [] + for v in self.feeds: + if isinstance(v, dict): + feeds_list.append(v) + else: + feeds_list.append(v.to_dict()) + _dict['feeds'] = feeds_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this FeaturesResultsMetadata object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'FeaturesResultsMetadata') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'FeaturesResultsMetadata') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class Feed(object): +class Feed: """ RSS or ATOM feed found on the webpage. - :attr str link: (optional) URL of the RSS or ATOM feed. + :param str link: (optional) URL of the RSS or ATOM feed. """ - def __init__(self, link=None): + def __init__( + self, + *, + link: Optional[str] = None, + ) -> None: """ Initialize a Feed object. @@ -1810,75 +3948,95 @@ def __init__(self, link=None): self.link = link @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'Feed': """Initialize a Feed object from a json dictionary.""" args = {} - if 'link' in _dict: - args['link'] = _dict.get('link') + if (link := _dict.get('link')) is not None: + args['link'] = link return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a Feed object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'link') and self.link is not None: _dict['link'] = self.link return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this Feed object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'Feed') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'Feed') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class KeywordsOptions(object): +class KeywordsOptions: """ Returns important keywords in the content. Supported languages: English, French, German, Italian, Japanese, Korean, Portuguese, Russian, Spanish, Swedish. - :attr int limit: (optional) Maximum number of keywords to return. - :attr bool sentiment: (optional) Set this to `true` to return sentiment information - for detected keywords. - :attr bool emotion: (optional) Set this to `true` to analyze emotion for detected - keywords. + :param int limit: (optional) Maximum number of keywords to return. + :param bool sentiment: (optional) Set this to `true` to return sentiment + information for detected keywords. + :param bool emotion: (optional) Set this to `true` to analyze emotion for + detected keywords. """ - def __init__(self, limit=None, sentiment=None, emotion=None): + def __init__( + self, + *, + limit: Optional[int] = None, + sentiment: Optional[bool] = None, + emotion: Optional[bool] = None, + ) -> None: """ Initialize a KeywordsOptions object. :param int limit: (optional) Maximum number of keywords to return. :param bool sentiment: (optional) Set this to `true` to return sentiment - information for detected keywords. - :param bool emotion: (optional) Set this to `true` to analyze emotion for detected - keywords. + information for detected keywords. + :param bool emotion: (optional) Set this to `true` to analyze emotion for + detected keywords. """ self.limit = limit self.sentiment = sentiment self.emotion = emotion @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'KeywordsOptions': """Initialize a KeywordsOptions object from a json dictionary.""" args = {} - if 'limit' in _dict: - args['limit'] = _dict.get('limit') - if 'sentiment' in _dict: - args['sentiment'] = _dict.get('sentiment') - if 'emotion' in _dict: - args['emotion'] = _dict.get('emotion') + if (limit := _dict.get('limit')) is not None: + args['limit'] = limit + if (sentiment := _dict.get('sentiment')) is not None: + args['sentiment'] = sentiment + if (emotion := _dict.get('emotion')) is not None: + args['emotion'] = emotion return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a KeywordsOptions object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'limit') and self.limit is not None: @@ -1889,53 +4047,61 @@ def _to_dict(self): _dict['emotion'] = self.emotion return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this KeywordsOptions object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'KeywordsOptions') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'KeywordsOptions') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class KeywordsResult(object): +class KeywordsResult: """ The important keywords in the content, organized by relevance. - :attr int count: (optional) Number of times the keyword appears in the analyzed text. - :attr float relevance: (optional) Relevance score from 0 to 1. Higher values indicate - greater relevance. - :attr str text: (optional) The keyword text. - :attr EmotionScores emotion: (optional) Emotion analysis results for the keyword, - enabled with the `emotion` option. - :attr FeatureSentimentResults sentiment: (optional) Sentiment analysis results for the - keyword, enabled with the `sentiment` option. + :param int count: (optional) Number of times the keyword appears in the analyzed + text. + :param float relevance: (optional) Relevance score from 0 to 1. Higher values + indicate greater relevance. + :param str text: (optional) The keyword text. + :param EmotionScores emotion: (optional) Emotion analysis results for the + keyword, enabled with the `emotion` option. + :param FeatureSentimentResults sentiment: (optional) Sentiment analysis results + for the keyword, enabled with the `sentiment` option. """ - def __init__(self, - count=None, - relevance=None, - text=None, - emotion=None, - sentiment=None): + def __init__( + self, + *, + count: Optional[int] = None, + relevance: Optional[float] = None, + text: Optional[str] = None, + emotion: Optional['EmotionScores'] = None, + sentiment: Optional['FeatureSentimentResults'] = None, + ) -> None: """ Initialize a KeywordsResult object. - :param int count: (optional) Number of times the keyword appears in the analyzed - text. - :param float relevance: (optional) Relevance score from 0 to 1. Higher values - indicate greater relevance. + :param int count: (optional) Number of times the keyword appears in the + analyzed text. + :param float relevance: (optional) Relevance score from 0 to 1. Higher + values indicate greater relevance. :param str text: (optional) The keyword text. - :param EmotionScores emotion: (optional) Emotion analysis results for the keyword, - enabled with the `emotion` option. - :param FeatureSentimentResults sentiment: (optional) Sentiment analysis results - for the keyword, enabled with the `sentiment` option. + :param EmotionScores emotion: (optional) Emotion analysis results for the + keyword, enabled with the `emotion` option. + :param FeatureSentimentResults sentiment: (optional) Sentiment analysis + results for the keyword, enabled with the `sentiment` option. """ self.count = count self.relevance = relevance @@ -1944,23 +4110,27 @@ def __init__(self, self.sentiment = sentiment @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'KeywordsResult': """Initialize a KeywordsResult object from a json dictionary.""" args = {} - if 'count' in _dict: - args['count'] = _dict.get('count') - if 'relevance' in _dict: - args['relevance'] = _dict.get('relevance') - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'emotion' in _dict: - args['emotion'] = EmotionScores._from_dict(_dict.get('emotion')) - if 'sentiment' in _dict: - args['sentiment'] = FeatureSentimentResults._from_dict( - _dict.get('sentiment')) + if (count := _dict.get('count')) is not None: + args['count'] = count + if (relevance := _dict.get('relevance')) is not None: + args['relevance'] = relevance + if (text := _dict.get('text')) is not None: + args['text'] = text + if (emotion := _dict.get('emotion')) is not None: + args['emotion'] = EmotionScores.from_dict(emotion) + if (sentiment := _dict.get('sentiment')) is not None: + args['sentiment'] = FeatureSentimentResults.from_dict(sentiment) return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a KeywordsResult object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'count') and self.count is not None: @@ -1970,243 +4140,332 @@ def _to_dict(self): if hasattr(self, 'text') and self.text is not None: _dict['text'] = self.text if hasattr(self, 'emotion') and self.emotion is not None: - _dict['emotion'] = self.emotion._to_dict() + if isinstance(self.emotion, dict): + _dict['emotion'] = self.emotion + else: + _dict['emotion'] = self.emotion.to_dict() if hasattr(self, 'sentiment') and self.sentiment is not None: - _dict['sentiment'] = self.sentiment._to_dict() + if isinstance(self.sentiment, dict): + _dict['sentiment'] = self.sentiment + else: + _dict['sentiment'] = self.sentiment.to_dict() return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this KeywordsResult object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'KeywordsResult') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'KeywordsResult') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class ListModelsResults(object): +class ListModelsResults: """ Custom models that are available for entities and relations. - :attr list[Model] models: (optional) An array of available models. + :param List[Model] models: (optional) An array of available models. """ - def __init__(self, models=None): + def __init__( + self, + *, + models: Optional[List['Model']] = None, + ) -> None: """ Initialize a ListModelsResults object. - :param list[Model] models: (optional) An array of available models. + :param List[Model] models: (optional) An array of available models. """ self.models = models @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'ListModelsResults': """Initialize a ListModelsResults object from a json dictionary.""" args = {} - if 'models' in _dict: - args['models'] = [ - Model._from_dict(x) for x in (_dict.get('models')) - ] + if (models := _dict.get('models')) is not None: + args['models'] = [Model.from_dict(v) for v in models] return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a ListModelsResults object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'models') and self.models is not None: - _dict['models'] = [x._to_dict() for x in self.models] + models_list = [] + for v in self.models: + if isinstance(v, dict): + models_list.append(v) + else: + models_list.append(v.to_dict()) + _dict['models'] = models_list return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this ListModelsResults object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'ListModelsResults') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'ListModelsResults') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class MetadataOptions(object): +class Model: """ - Returns information from the document, including author name, title, RSS/ATOM feeds, - prominent page image, and publication date. Supports URL and HTML input types only. + Model. + :param str status: (optional) When the status is `available`, the model is ready + to use. + :param str model_id: (optional) Unique model ID. + :param str language: (optional) ISO 639-1 code that indicates the language of + the model. + :param str description: (optional) Model description. + :param str workspace_id: (optional) ID of the Watson Knowledge Studio workspace + that deployed this model to Natural Language Understanding. + :param str model_version: (optional) The model version, if it was manually + provided in Watson Knowledge Studio. + :param str version: (optional) Deprecated: Deprecated — use `model_version`. + :param str version_description: (optional) The description of the version, if it + was manually provided in Watson Knowledge Studio. + :param datetime created: (optional) A dateTime indicating when the model was + created. """ - def __init__(self): + def __init__( + self, + *, + status: Optional[str] = None, + model_id: Optional[str] = None, + language: Optional[str] = None, + description: Optional[str] = None, + workspace_id: Optional[str] = None, + model_version: Optional[str] = None, + version: Optional[str] = None, + version_description: Optional[str] = None, + created: Optional[datetime] = None, + ) -> None: """ - Initialize a MetadataOptions object. + Initialize a Model object. + :param str status: (optional) When the status is `available`, the model is + ready to use. + :param str model_id: (optional) Unique model ID. + :param str language: (optional) ISO 639-1 code that indicates the language + of the model. + :param str description: (optional) Model description. + :param str workspace_id: (optional) ID of the Watson Knowledge Studio + workspace that deployed this model to Natural Language Understanding. + :param str model_version: (optional) The model version, if it was manually + provided in Watson Knowledge Studio. + :param str version: (optional) Deprecated: Deprecated — use + `model_version`. + :param str version_description: (optional) The description of the version, + if it was manually provided in Watson Knowledge Studio. + :param datetime created: (optional) A dateTime indicating when the model + was created. """ + self.status = status + self.model_id = model_id + self.language = language + self.description = description + self.workspace_id = workspace_id + self.model_version = model_version + self.version = version + self.version_description = version_description + self.created = created @classmethod - def _from_dict(cls, _dict): - """Initialize a MetadataOptions object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'Model': + """Initialize a Model object from a json dictionary.""" args = {} + if (status := _dict.get('status')) is not None: + args['status'] = status + if (model_id := _dict.get('model_id')) is not None: + args['model_id'] = model_id + if (language := _dict.get('language')) is not None: + args['language'] = language + if (description := _dict.get('description')) is not None: + args['description'] = description + if (workspace_id := _dict.get('workspace_id')) is not None: + args['workspace_id'] = workspace_id + if (model_version := _dict.get('model_version')) is not None: + args['model_version'] = model_version + if (version := _dict.get('version')) is not None: + args['version'] = version + if (version_description := + _dict.get('version_description')) is not None: + args['version_description'] = version_description + if (created := _dict.get('created')) is not None: + args['created'] = string_to_datetime(created) return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a Model object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} + if hasattr(self, 'status') and self.status is not None: + _dict['status'] = self.status + if hasattr(self, 'model_id') and self.model_id is not None: + _dict['model_id'] = self.model_id + if hasattr(self, 'language') and self.language is not None: + _dict['language'] = self.language + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'workspace_id') and self.workspace_id is not None: + _dict['workspace_id'] = self.workspace_id + if hasattr(self, 'model_version') and self.model_version is not None: + _dict['model_version'] = self.model_version + if hasattr(self, 'version') and self.version is not None: + _dict['version'] = self.version + if hasattr( + self, + 'version_description') and self.version_description is not None: + _dict['version_description'] = self.version_description + if hasattr(self, 'created') and self.created is not None: + _dict['created'] = datetime_to_string(self.created) return _dict - def __str__(self): - """Return a `str` version of this MetadataOptions object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Model object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'Model') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'Model') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other + class StatusEnum(str, Enum): + """ + When the status is `available`, the model is ready to use. + """ + + STARTING = 'starting' + TRAINING = 'training' + DEPLOYING = 'deploying' + AVAILABLE = 'available' + ERROR = 'error' + DELETED = 'deleted' -class Model(object): + +class Notice: """ - Model. + A list of messages describing model training issues when model status is `error`. - :attr str status: (optional) When the status is `available`, the model is ready to - use. - :attr str model_id: (optional) Unique model ID. - :attr str language: (optional) ISO 639-1 code indicating the language of the model. - :attr str description: (optional) Model description. - :attr str workspace_id: (optional) ID of the Watson Knowledge Studio workspace that - deployed this model to Natural Language Understanding. - :attr str version: (optional) The model version, if it was manually provided in Watson - Knowledge Studio. - :attr str version_description: (optional) The description of the version, if it was - manually provided in Watson Knowledge Studio. - :attr datetime created: (optional) A dateTime indicating when the model was created. - """ - - def __init__(self, - status=None, - model_id=None, - language=None, - description=None, - workspace_id=None, - version=None, - version_description=None, - created=None): + :param str message: (optional) Describes deficiencies or inconsistencies in + training data. + """ + + def __init__( + self, + *, + message: Optional[str] = None, + ) -> None: """ - Initialize a Model object. + Initialize a Notice object. - :param str status: (optional) When the status is `available`, the model is ready - to use. - :param str model_id: (optional) Unique model ID. - :param str language: (optional) ISO 639-1 code indicating the language of the - model. - :param str description: (optional) Model description. - :param str workspace_id: (optional) ID of the Watson Knowledge Studio workspace - that deployed this model to Natural Language Understanding. - :param str version: (optional) The model version, if it was manually provided in - Watson Knowledge Studio. - :param str version_description: (optional) The description of the version, if it - was manually provided in Watson Knowledge Studio. - :param datetime created: (optional) A dateTime indicating when the model was - created. """ - self.status = status - self.model_id = model_id - self.language = language - self.description = description - self.workspace_id = workspace_id - self.version = version - self.version_description = version_description - self.created = created + self.message = message @classmethod - def _from_dict(cls, _dict): - """Initialize a Model object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'Notice': + """Initialize a Notice object from a json dictionary.""" args = {} - if 'status' in _dict: - args['status'] = _dict.get('status') - if 'model_id' in _dict: - args['model_id'] = _dict.get('model_id') - if 'language' in _dict: - args['language'] = _dict.get('language') - if 'description' in _dict: - args['description'] = _dict.get('description') - if 'workspace_id' in _dict: - args['workspace_id'] = _dict.get('workspace_id') - if 'version' in _dict: - args['version'] = _dict.get('version') - if 'version_description' in _dict: - args['version_description'] = _dict.get('version_description') - if 'created' in _dict: - args['created'] = string_to_datetime(_dict.get('created')) + if (message := _dict.get('message')) is not None: + args['message'] = message return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a Notice object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'status') and self.status is not None: - _dict['status'] = self.status - if hasattr(self, 'model_id') and self.model_id is not None: - _dict['model_id'] = self.model_id - if hasattr(self, 'language') and self.language is not None: - _dict['language'] = self.language - if hasattr(self, 'description') and self.description is not None: - _dict['description'] = self.description - if hasattr(self, 'workspace_id') and self.workspace_id is not None: - _dict['workspace_id'] = self.workspace_id - if hasattr(self, 'version') and self.version is not None: - _dict['version'] = self.version - if hasattr( - self, - 'version_description') and self.version_description is not None: - _dict['version_description'] = self.version_description - if hasattr(self, 'created') and self.created is not None: - _dict['created'] = datetime_to_string(self.created) + if hasattr(self, 'message') and getattr(self, 'message') is not None: + _dict['message'] = getattr(self, 'message') return _dict - def __str__(self): - """Return a `str` version of this Model object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Notice object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'Notice') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'Notice') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class RelationArgument(object): +class RelationArgument: """ RelationArgument. - :attr list[RelationEntity] entities: (optional) An array of extracted entities. - :attr list[int] location: (optional) Character offsets indicating the beginning and - end of the mention in the analyzed text. - :attr str text: (optional) Text that corresponds to the argument. + :param List[RelationEntity] entities: (optional) An array of extracted entities. + :param List[int] location: (optional) Character offsets indicating the beginning + and end of the mention in the analyzed text. + :param str text: (optional) Text that corresponds to the argument. """ - def __init__(self, entities=None, location=None, text=None): + def __init__( + self, + *, + entities: Optional[List['RelationEntity']] = None, + location: Optional[List[int]] = None, + text: Optional[str] = None, + ) -> None: """ Initialize a RelationArgument object. - :param list[RelationEntity] entities: (optional) An array of extracted entities. - :param list[int] location: (optional) Character offsets indicating the beginning - and end of the mention in the analyzed text. + :param List[RelationEntity] entities: (optional) An array of extracted + entities. + :param List[int] location: (optional) Character offsets indicating the + beginning and end of the mention in the analyzed text. :param str text: (optional) Text that corresponds to the argument. """ self.entities = entities @@ -2214,54 +4473,72 @@ def __init__(self, entities=None, location=None, text=None): self.text = text @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'RelationArgument': """Initialize a RelationArgument object from a json dictionary.""" args = {} - if 'entities' in _dict: - args['entities'] = [ - RelationEntity._from_dict(x) for x in (_dict.get('entities')) - ] - if 'location' in _dict: - args['location'] = _dict.get('location') - if 'text' in _dict: - args['text'] = _dict.get('text') + if (entities := _dict.get('entities')) is not None: + args['entities'] = [RelationEntity.from_dict(v) for v in entities] + if (location := _dict.get('location')) is not None: + args['location'] = location + if (text := _dict.get('text')) is not None: + args['text'] = text return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a RelationArgument object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'entities') and self.entities is not None: - _dict['entities'] = [x._to_dict() for x in self.entities] + entities_list = [] + for v in self.entities: + if isinstance(v, dict): + entities_list.append(v) + else: + entities_list.append(v.to_dict()) + _dict['entities'] = entities_list if hasattr(self, 'location') and self.location is not None: _dict['location'] = self.location if hasattr(self, 'text') and self.text is not None: _dict['text'] = self.text return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this RelationArgument object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'RelationArgument') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'RelationArgument') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class RelationEntity(object): +class RelationEntity: """ An entity that corresponds with an argument in a relation. - :attr str text: (optional) Text that corresponds to the entity. - :attr str type: (optional) Entity type. + :param str text: (optional) Text that corresponds to the entity. + :param str type: (optional) Entity type. """ - def __init__(self, text=None, type=None): + def __init__( + self, + *, + text: Optional[str] = None, + type: Optional[str] = None, + ) -> None: """ Initialize a RelationEntity object. @@ -2272,16 +4549,21 @@ def __init__(self, text=None, type=None): self.type = type @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'RelationEntity': """Initialize a RelationEntity object from a json dictionary.""" args = {} - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'type' in _dict: - args['type'] = _dict.get('type') + if (text := _dict.get('text')) is not None: + args['text'] = text + if (type := _dict.get('type')) is not None: + args['type'] = type return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a RelationEntity object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'text') and self.text is not None: @@ -2290,97 +4572,121 @@ def _to_dict(self): _dict['type'] = self.type return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this RelationEntity object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'RelationEntity') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'RelationEntity') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class RelationsOptions(object): +class RelationsOptions: """ Recognizes when two entities are related and identifies the type of relation. For example, an `awardedTo` relation might connect the entities "Nobel Prize" and "Albert - Einstein". See [Relation - types](https://cloud.ibm.com/docs/services/natural-language-understanding/relations.html). + Einstein". For more information, see [Relation + types](https://cloud.ibm.com/docs/natural-language-understanding?topic=natural-language-understanding-relations). Supported languages: Arabic, English, German, Japanese, Korean, Spanish. Chinese, Dutch, French, Italian, and Portuguese custom models are also supported. - :attr str model: (optional) Enter a [custom - model](https://cloud.ibm.com/docs/services/natural-language-understanding/customizing.html) - ID to override the default model. + :param str model: (optional) Enter a [custom + model](https://cloud.ibm.com/docs/natural-language-understanding?topic=natural-language-understanding-customizing) + ID to override the default model. """ - def __init__(self, model=None): + def __init__( + self, + *, + model: Optional[str] = None, + ) -> None: """ Initialize a RelationsOptions object. :param str model: (optional) Enter a [custom - model](https://cloud.ibm.com/docs/services/natural-language-understanding/customizing.html) - ID to override the default model. + model](https://cloud.ibm.com/docs/natural-language-understanding?topic=natural-language-understanding-customizing) + ID to override the default model. """ self.model = model @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'RelationsOptions': """Initialize a RelationsOptions object from a json dictionary.""" args = {} - if 'model' in _dict: - args['model'] = _dict.get('model') + if (model := _dict.get('model')) is not None: + args['model'] = model return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a RelationsOptions object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'model') and self.model is not None: _dict['model'] = self.model return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this RelationsOptions object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'RelationsOptions') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'RelationsOptions') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class RelationsResult(object): +class RelationsResult: """ The relations between entities found in the content. - :attr float score: (optional) Confidence score for the relation. Higher values - indicate greater confidence. - :attr str sentence: (optional) The sentence that contains the relation. - :attr str type: (optional) The type of the relation. - :attr list[RelationArgument] arguments: (optional) Entity mentions that are involved - in the relation. + :param float score: (optional) Confidence score for the relation. Higher values + indicate greater confidence. + :param str sentence: (optional) The sentence that contains the relation. + :param str type: (optional) The type of the relation. + :param List[RelationArgument] arguments: (optional) Entity mentions that are + involved in the relation. """ - def __init__(self, score=None, sentence=None, type=None, arguments=None): + def __init__( + self, + *, + score: Optional[float] = None, + sentence: Optional[str] = None, + type: Optional[str] = None, + arguments: Optional[List['RelationArgument']] = None, + ) -> None: """ Initialize a RelationsResult object. - :param float score: (optional) Confidence score for the relation. Higher values - indicate greater confidence. + :param float score: (optional) Confidence score for the relation. Higher + values indicate greater confidence. :param str sentence: (optional) The sentence that contains the relation. :param str type: (optional) The type of the relation. - :param list[RelationArgument] arguments: (optional) Entity mentions that are - involved in the relation. + :param List[RelationArgument] arguments: (optional) Entity mentions that + are involved in the relation. """ self.score = score self.sentence = sentence @@ -2388,22 +4694,27 @@ def __init__(self, score=None, sentence=None, type=None, arguments=None): self.arguments = arguments @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'RelationsResult': """Initialize a RelationsResult object from a json dictionary.""" args = {} - if 'score' in _dict: - args['score'] = _dict.get('score') - if 'sentence' in _dict: - args['sentence'] = _dict.get('sentence') - if 'type' in _dict: - args['type'] = _dict.get('type') - if 'arguments' in _dict: + if (score := _dict.get('score')) is not None: + args['score'] = score + if (sentence := _dict.get('sentence')) is not None: + args['sentence'] = sentence + if (type := _dict.get('type')) is not None: + args['type'] = type + if (arguments := _dict.get('arguments')) is not None: args['arguments'] = [ - RelationArgument._from_dict(x) for x in (_dict.get('arguments')) + RelationArgument.from_dict(v) for v in arguments ] return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a RelationsResult object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'score') and self.score is not None: @@ -2413,33 +4724,48 @@ def _to_dict(self): if hasattr(self, 'type') and self.type is not None: _dict['type'] = self.type if hasattr(self, 'arguments') and self.arguments is not None: - _dict['arguments'] = [x._to_dict() for x in self.arguments] + arguments_list = [] + for v in self.arguments: + if isinstance(v, dict): + arguments_list.append(v) + else: + arguments_list.append(v.to_dict()) + _dict['arguments'] = arguments_list return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this RelationsResult object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'RelationsResult') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'RelationsResult') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class SemanticRolesEntity(object): +class SemanticRolesEntity: """ SemanticRolesEntity. - :attr str type: (optional) Entity type. - :attr str text: (optional) The entity text. + :param str type: (optional) Entity type. + :param str text: (optional) The entity text. """ - def __init__(self, type=None, text=None): + def __init__( + self, + *, + type: Optional[str] = None, + text: Optional[str] = None, + ) -> None: """ Initialize a SemanticRolesEntity object. @@ -2450,16 +4776,21 @@ def __init__(self, type=None, text=None): self.text = text @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'SemanticRolesEntity': """Initialize a SemanticRolesEntity object from a json dictionary.""" args = {} - if 'type' in _dict: - args['type'] = _dict.get('type') - if 'text' in _dict: - args['text'] = _dict.get('text') + if (type := _dict.get('type')) is not None: + args['type'] = type + if (text := _dict.get('text')) is not None: + args['text'] = text return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a SemanticRolesEntity object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'type') and self.type is not None: @@ -2468,29 +4799,37 @@ def _to_dict(self): _dict['text'] = self.text return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this SemanticRolesEntity object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'SemanticRolesEntity') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'SemanticRolesEntity') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class SemanticRolesKeyword(object): +class SemanticRolesKeyword: """ SemanticRolesKeyword. - :attr str text: (optional) The keyword text. + :param str text: (optional) The keyword text. """ - def __init__(self, text=None): + def __init__( + self, + *, + text: Optional[str] = None, + ) -> None: """ Initialize a SemanticRolesKeyword object. @@ -2499,74 +4838,95 @@ def __init__(self, text=None): self.text = text @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'SemanticRolesKeyword': """Initialize a SemanticRolesKeyword object from a json dictionary.""" args = {} - if 'text' in _dict: - args['text'] = _dict.get('text') + if (text := _dict.get('text')) is not None: + args['text'] = text return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a SemanticRolesKeyword object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'text') and self.text is not None: _dict['text'] = self.text return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this SemanticRolesKeyword object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'SemanticRolesKeyword') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'SemanticRolesKeyword') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class SemanticRolesOptions(object): +class SemanticRolesOptions: """ Parses sentences into subject, action, and object form. Supported languages: English, German, Japanese, Korean, Spanish. - :attr int limit: (optional) Maximum number of semantic_roles results to return. - :attr bool keywords: (optional) Set this to `true` to return keyword information for - subjects and objects. - :attr bool entities: (optional) Set this to `true` to return entity information for - subjects and objects. + :param int limit: (optional) Maximum number of semantic_roles results to return. + :param bool keywords: (optional) Set this to `true` to return keyword + information for subjects and objects. + :param bool entities: (optional) Set this to `true` to return entity information + for subjects and objects. """ - def __init__(self, limit=None, keywords=None, entities=None): + def __init__( + self, + *, + limit: Optional[int] = None, + keywords: Optional[bool] = None, + entities: Optional[bool] = None, + ) -> None: """ Initialize a SemanticRolesOptions object. - :param int limit: (optional) Maximum number of semantic_roles results to return. - :param bool keywords: (optional) Set this to `true` to return keyword information - for subjects and objects. - :param bool entities: (optional) Set this to `true` to return entity information - for subjects and objects. + :param int limit: (optional) Maximum number of semantic_roles results to + return. + :param bool keywords: (optional) Set this to `true` to return keyword + information for subjects and objects. + :param bool entities: (optional) Set this to `true` to return entity + information for subjects and objects. """ self.limit = limit self.keywords = keywords self.entities = entities @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'SemanticRolesOptions': """Initialize a SemanticRolesOptions object from a json dictionary.""" args = {} - if 'limit' in _dict: - args['limit'] = _dict.get('limit') - if 'keywords' in _dict: - args['keywords'] = _dict.get('keywords') - if 'entities' in _dict: - args['entities'] = _dict.get('entities') + if (limit := _dict.get('limit')) is not None: + args['limit'] = limit + if (keywords := _dict.get('keywords')) is not None: + args['keywords'] = keywords + if (entities := _dict.get('entities')) is not None: + args['entities'] = entities return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a SemanticRolesOptions object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'limit') and self.limit is not None: @@ -2577,47 +4937,58 @@ def _to_dict(self): _dict['entities'] = self.entities return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this SemanticRolesOptions object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'SemanticRolesOptions') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'SemanticRolesOptions') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class SemanticRolesResult(object): +class SemanticRolesResult: """ The object containing the actions and the objects the actions act upon. - :attr str sentence: (optional) Sentence from the source that contains the subject, - action, and object. - :attr SemanticRolesResultSubject subject: (optional) The extracted subject from the - sentence. - :attr SemanticRolesResultAction action: (optional) The extracted action from the - sentence. - :attr SemanticRolesResultObject object: (optional) The extracted object from the - sentence. + :param str sentence: (optional) Sentence from the source that contains the + subject, action, and object. + :param SemanticRolesResultSubject subject: (optional) The extracted subject from + the sentence. + :param SemanticRolesResultAction action: (optional) The extracted action from + the sentence. + :param SemanticRolesResultObject object: (optional) The extracted object from + the sentence. """ - def __init__(self, sentence=None, subject=None, action=None, object=None): + def __init__( + self, + *, + sentence: Optional[str] = None, + subject: Optional['SemanticRolesResultSubject'] = None, + action: Optional['SemanticRolesResultAction'] = None, + object: Optional['SemanticRolesResultObject'] = None, + ) -> None: """ Initialize a SemanticRolesResult object. :param str sentence: (optional) Sentence from the source that contains the - subject, action, and object. - :param SemanticRolesResultSubject subject: (optional) The extracted subject from - the sentence. - :param SemanticRolesResultAction action: (optional) The extracted action from the - sentence. - :param SemanticRolesResultObject object: (optional) The extracted object from the - sentence. + subject, action, and object. + :param SemanticRolesResultSubject subject: (optional) The extracted subject + from the sentence. + :param SemanticRolesResultAction action: (optional) The extracted action + from the sentence. + :param SemanticRolesResultObject object: (optional) The extracted object + from the sentence. """ self.sentence = sentence self.subject = subject @@ -2625,60 +4996,81 @@ def __init__(self, sentence=None, subject=None, action=None, object=None): self.object = object @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'SemanticRolesResult': """Initialize a SemanticRolesResult object from a json dictionary.""" args = {} - if 'sentence' in _dict: - args['sentence'] = _dict.get('sentence') - if 'subject' in _dict: - args['subject'] = SemanticRolesResultSubject._from_dict( - _dict.get('subject')) - if 'action' in _dict: - args['action'] = SemanticRolesResultAction._from_dict( - _dict.get('action')) - if 'object' in _dict: - args['object'] = SemanticRolesResultObject._from_dict( - _dict.get('object')) + if (sentence := _dict.get('sentence')) is not None: + args['sentence'] = sentence + if (subject := _dict.get('subject')) is not None: + args['subject'] = SemanticRolesResultSubject.from_dict(subject) + if (action := _dict.get('action')) is not None: + args['action'] = SemanticRolesResultAction.from_dict(action) + if (object := _dict.get('object')) is not None: + args['object'] = SemanticRolesResultObject.from_dict(object) return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a SemanticRolesResult object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'sentence') and self.sentence is not None: _dict['sentence'] = self.sentence if hasattr(self, 'subject') and self.subject is not None: - _dict['subject'] = self.subject._to_dict() + if isinstance(self.subject, dict): + _dict['subject'] = self.subject + else: + _dict['subject'] = self.subject.to_dict() if hasattr(self, 'action') and self.action is not None: - _dict['action'] = self.action._to_dict() + if isinstance(self.action, dict): + _dict['action'] = self.action + else: + _dict['action'] = self.action.to_dict() if hasattr(self, 'object') and self.object is not None: - _dict['object'] = self.object._to_dict() + if isinstance(self.object, dict): + _dict['object'] = self.object + else: + _dict['object'] = self.object.to_dict() return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this SemanticRolesResult object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'SemanticRolesResult') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'SemanticRolesResult') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class SemanticRolesResultAction(object): +class SemanticRolesResultAction: """ The extracted action from the sentence. - :attr str text: (optional) Analyzed text that corresponds to the action. - :attr str normalized: (optional) normalized version of the action. - :attr SemanticRolesVerb verb: (optional) + :param str text: (optional) Analyzed text that corresponds to the action. + :param str normalized: (optional) normalized version of the action. + :param SemanticRolesVerb verb: (optional) """ - def __init__(self, text=None, normalized=None, verb=None): + def __init__( + self, + *, + text: Optional[str] = None, + normalized: Optional[str] = None, + verb: Optional['SemanticRolesVerb'] = None, + ) -> None: """ Initialize a SemanticRolesResultAction object. @@ -2691,18 +5083,23 @@ def __init__(self, text=None, normalized=None, verb=None): self.verb = verb @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'SemanticRolesResultAction': """Initialize a SemanticRolesResultAction object from a json dictionary.""" args = {} - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'normalized' in _dict: - args['normalized'] = _dict.get('normalized') - if 'verb' in _dict: - args['verb'] = SemanticRolesVerb._from_dict(_dict.get('verb')) + if (text := _dict.get('text')) is not None: + args['text'] = text + if (normalized := _dict.get('normalized')) is not None: + args['normalized'] = normalized + if (verb := _dict.get('verb')) is not None: + args['verb'] = SemanticRolesVerb.from_dict(verb) return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a SemanticRolesResultAction object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'text') and self.text is not None: @@ -2710,156 +5107,215 @@ def _to_dict(self): if hasattr(self, 'normalized') and self.normalized is not None: _dict['normalized'] = self.normalized if hasattr(self, 'verb') and self.verb is not None: - _dict['verb'] = self.verb._to_dict() + if isinstance(self.verb, dict): + _dict['verb'] = self.verb + else: + _dict['verb'] = self.verb.to_dict() return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this SemanticRolesResultAction object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'SemanticRolesResultAction') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'SemanticRolesResultAction') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class SemanticRolesResultObject(object): +class SemanticRolesResultObject: """ The extracted object from the sentence. - :attr str text: (optional) Object text. - :attr list[SemanticRolesKeyword] keywords: (optional) An array of extracted keywords. + :param str text: (optional) Object text. + :param List[SemanticRolesKeyword] keywords: (optional) An array of extracted + keywords. """ - def __init__(self, text=None, keywords=None): + def __init__( + self, + *, + text: Optional[str] = None, + keywords: Optional[List['SemanticRolesKeyword']] = None, + ) -> None: """ Initialize a SemanticRolesResultObject object. :param str text: (optional) Object text. - :param list[SemanticRolesKeyword] keywords: (optional) An array of extracted - keywords. + :param List[SemanticRolesKeyword] keywords: (optional) An array of + extracted keywords. """ self.text = text self.keywords = keywords @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'SemanticRolesResultObject': """Initialize a SemanticRolesResultObject object from a json dictionary.""" args = {} - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'keywords' in _dict: + if (text := _dict.get('text')) is not None: + args['text'] = text + if (keywords := _dict.get('keywords')) is not None: args['keywords'] = [ - SemanticRolesKeyword._from_dict(x) - for x in (_dict.get('keywords')) + SemanticRolesKeyword.from_dict(v) for v in keywords ] return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a SemanticRolesResultObject object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'text') and self.text is not None: _dict['text'] = self.text if hasattr(self, 'keywords') and self.keywords is not None: - _dict['keywords'] = [x._to_dict() for x in self.keywords] + keywords_list = [] + for v in self.keywords: + if isinstance(v, dict): + keywords_list.append(v) + else: + keywords_list.append(v.to_dict()) + _dict['keywords'] = keywords_list return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this SemanticRolesResultObject object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'SemanticRolesResultObject') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'SemanticRolesResultObject') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class SemanticRolesResultSubject(object): +class SemanticRolesResultSubject: """ The extracted subject from the sentence. - :attr str text: (optional) Text that corresponds to the subject role. - :attr list[SemanticRolesEntity] entities: (optional) An array of extracted entities. - :attr list[SemanticRolesKeyword] keywords: (optional) An array of extracted keywords. + :param str text: (optional) Text that corresponds to the subject role. + :param List[SemanticRolesEntity] entities: (optional) An array of extracted + entities. + :param List[SemanticRolesKeyword] keywords: (optional) An array of extracted + keywords. """ - def __init__(self, text=None, entities=None, keywords=None): + def __init__( + self, + *, + text: Optional[str] = None, + entities: Optional[List['SemanticRolesEntity']] = None, + keywords: Optional[List['SemanticRolesKeyword']] = None, + ) -> None: """ Initialize a SemanticRolesResultSubject object. :param str text: (optional) Text that corresponds to the subject role. - :param list[SemanticRolesEntity] entities: (optional) An array of extracted - entities. - :param list[SemanticRolesKeyword] keywords: (optional) An array of extracted - keywords. + :param List[SemanticRolesEntity] entities: (optional) An array of extracted + entities. + :param List[SemanticRolesKeyword] keywords: (optional) An array of + extracted keywords. """ self.text = text self.entities = entities self.keywords = keywords @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'SemanticRolesResultSubject': """Initialize a SemanticRolesResultSubject object from a json dictionary.""" args = {} - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'entities' in _dict: + if (text := _dict.get('text')) is not None: + args['text'] = text + if (entities := _dict.get('entities')) is not None: args['entities'] = [ - SemanticRolesEntity._from_dict(x) - for x in (_dict.get('entities')) + SemanticRolesEntity.from_dict(v) for v in entities ] - if 'keywords' in _dict: + if (keywords := _dict.get('keywords')) is not None: args['keywords'] = [ - SemanticRolesKeyword._from_dict(x) - for x in (_dict.get('keywords')) + SemanticRolesKeyword.from_dict(v) for v in keywords ] return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a SemanticRolesResultSubject object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'text') and self.text is not None: _dict['text'] = self.text if hasattr(self, 'entities') and self.entities is not None: - _dict['entities'] = [x._to_dict() for x in self.entities] + entities_list = [] + for v in self.entities: + if isinstance(v, dict): + entities_list.append(v) + else: + entities_list.append(v.to_dict()) + _dict['entities'] = entities_list if hasattr(self, 'keywords') and self.keywords is not None: - _dict['keywords'] = [x._to_dict() for x in self.keywords] + keywords_list = [] + for v in self.keywords: + if isinstance(v, dict): + keywords_list.append(v) + else: + keywords_list.append(v.to_dict()) + _dict['keywords'] = keywords_list return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this SemanticRolesResultSubject object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'SemanticRolesResultSubject') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'SemanticRolesResultSubject') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class SemanticRolesVerb(object): +class SemanticRolesVerb: """ SemanticRolesVerb. - :attr str text: (optional) The keyword text. - :attr str tense: (optional) Verb tense. + :param str text: (optional) The keyword text. + :param str tense: (optional) Verb tense. """ - def __init__(self, text=None, tense=None): + def __init__( + self, + *, + text: Optional[str] = None, + tense: Optional[str] = None, + ) -> None: """ Initialize a SemanticRolesVerb object. @@ -2870,16 +5326,21 @@ def __init__(self, text=None, tense=None): self.tense = tense @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'SemanticRolesVerb': """Initialize a SemanticRolesVerb object from a json dictionary.""" args = {} - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'tense' in _dict: - args['tense'] = _dict.get('tense') + if (text := _dict.get('text')) is not None: + args['text'] = text + if (tense := _dict.get('tense')) is not None: + args['tense'] = tense return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a SemanticRolesVerb object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'text') and self.text is not None: @@ -2888,52 +5349,66 @@ def _to_dict(self): _dict['tense'] = self.tense return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this SemanticRolesVerb object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'SemanticRolesVerb') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'SemanticRolesVerb') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class SentenceResult(object): +class SentenceResult: """ SentenceResult. - :attr str text: (optional) The sentence. - :attr list[int] location: (optional) Character offsets indicating the beginning and - end of the sentence in the analyzed text. + :param str text: (optional) The sentence. + :param List[int] location: (optional) Character offsets indicating the beginning + and end of the sentence in the analyzed text. """ - def __init__(self, text=None, location=None): + def __init__( + self, + *, + text: Optional[str] = None, + location: Optional[List[int]] = None, + ) -> None: """ Initialize a SentenceResult object. :param str text: (optional) The sentence. - :param list[int] location: (optional) Character offsets indicating the beginning - and end of the sentence in the analyzed text. + :param List[int] location: (optional) Character offsets indicating the + beginning and end of the sentence in the analyzed text. """ self.text = text self.location = location @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'SentenceResult': """Initialize a SentenceResult object from a json dictionary.""" args = {} - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'location' in _dict: - args['location'] = _dict.get('location') + if (text := _dict.get('text')) is not None: + args['text'] = text + if (location := _dict.get('location')) is not None: + args['location'] = location return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a SentenceResult object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'text') and self.text is not None: @@ -2942,22 +5417,26 @@ def _to_dict(self): _dict['location'] = self.location return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this SentenceResult object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'SentenceResult') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'SentenceResult') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class SentimentOptions(object): +class SentimentOptions: """ Analyzes the general sentiment of your content or the sentiment toward specific target phrases. You can analyze sentiment for detected entities with `entities.sentiment` and @@ -2965,35 +5444,45 @@ class SentimentOptions(object): Supported languages: Arabic, English, French, German, Italian, Japanese, Korean, Portuguese, Russian, Spanish. - :attr bool document: (optional) Set this to `false` to hide document-level sentiment - results. - :attr list[str] targets: (optional) Sentiment results will be returned for each target - string that is found in the document. + :param bool document: (optional) Set this to `false` to hide document-level + sentiment results. + :param List[str] targets: (optional) Sentiment results will be returned for each + target string that is found in the document. """ - def __init__(self, document=None, targets=None): + def __init__( + self, + *, + document: Optional[bool] = None, + targets: Optional[List[str]] = None, + ) -> None: """ Initialize a SentimentOptions object. :param bool document: (optional) Set this to `false` to hide document-level - sentiment results. - :param list[str] targets: (optional) Sentiment results will be returned for each - target string that is found in the document. + sentiment results. + :param List[str] targets: (optional) Sentiment results will be returned for + each target string that is found in the document. """ self.document = document self.targets = targets @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'SentimentOptions': """Initialize a SentimentOptions object from a json dictionary.""" args = {} - if 'document' in _dict: - args['document'] = _dict.get('document') - if 'targets' in _dict: - args['targets'] = _dict.get('targets') + if (document := _dict.get('document')) is not None: + args['document'] = document + if (targets := _dict.get('targets')) is not None: + args['targets'] = targets return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a SentimentOptions object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'document') and self.document is not None: @@ -3002,164 +5491,220 @@ def _to_dict(self): _dict['targets'] = self.targets return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this SentimentOptions object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'SentimentOptions') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'SentimentOptions') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class SentimentResult(object): +class SentimentResult: """ The sentiment of the content. - :attr DocumentSentimentResults document: (optional) The document level sentiment. - :attr list[TargetedSentimentResults] targets: (optional) The targeted sentiment to - analyze. + :param DocumentSentimentResults document: (optional) The document level + sentiment. + :param List[TargetedSentimentResults] targets: (optional) The targeted sentiment + to analyze. """ - def __init__(self, document=None, targets=None): + def __init__( + self, + *, + document: Optional['DocumentSentimentResults'] = None, + targets: Optional[List['TargetedSentimentResults']] = None, + ) -> None: """ Initialize a SentimentResult object. - :param DocumentSentimentResults document: (optional) The document level sentiment. - :param list[TargetedSentimentResults] targets: (optional) The targeted sentiment - to analyze. + :param DocumentSentimentResults document: (optional) The document level + sentiment. + :param List[TargetedSentimentResults] targets: (optional) The targeted + sentiment to analyze. """ self.document = document self.targets = targets @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'SentimentResult': """Initialize a SentimentResult object from a json dictionary.""" args = {} - if 'document' in _dict: - args['document'] = DocumentSentimentResults._from_dict( - _dict.get('document')) - if 'targets' in _dict: + if (document := _dict.get('document')) is not None: + args['document'] = DocumentSentimentResults.from_dict(document) + if (targets := _dict.get('targets')) is not None: args['targets'] = [ - TargetedSentimentResults._from_dict(x) - for x in (_dict.get('targets')) + TargetedSentimentResults.from_dict(v) for v in targets ] return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a SentimentResult object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'document') and self.document is not None: - _dict['document'] = self.document._to_dict() + if isinstance(self.document, dict): + _dict['document'] = self.document + else: + _dict['document'] = self.document.to_dict() if hasattr(self, 'targets') and self.targets is not None: - _dict['targets'] = [x._to_dict() for x in self.targets] + targets_list = [] + for v in self.targets: + if isinstance(v, dict): + targets_list.append(v) + else: + targets_list.append(v.to_dict()) + _dict['targets'] = targets_list return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this SentimentResult object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'SentimentResult') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'SentimentResult') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class SyntaxOptions(object): +class SyntaxOptions: """ Returns tokens and sentences from the input text. - :attr SyntaxOptionsTokens tokens: (optional) Tokenization options. - :attr bool sentences: (optional) Set this to `true` to return sentence information. + :param SyntaxOptionsTokens tokens: (optional) Tokenization options. + :param bool sentences: (optional) Set this to `true` to return sentence + information. """ - def __init__(self, tokens=None, sentences=None): + def __init__( + self, + *, + tokens: Optional['SyntaxOptionsTokens'] = None, + sentences: Optional[bool] = None, + ) -> None: """ Initialize a SyntaxOptions object. :param SyntaxOptionsTokens tokens: (optional) Tokenization options. :param bool sentences: (optional) Set this to `true` to return sentence - information. + information. """ self.tokens = tokens self.sentences = sentences @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'SyntaxOptions': """Initialize a SyntaxOptions object from a json dictionary.""" args = {} - if 'tokens' in _dict: - args['tokens'] = SyntaxOptionsTokens._from_dict(_dict.get('tokens')) - if 'sentences' in _dict: - args['sentences'] = _dict.get('sentences') + if (tokens := _dict.get('tokens')) is not None: + args['tokens'] = SyntaxOptionsTokens.from_dict(tokens) + if (sentences := _dict.get('sentences')) is not None: + args['sentences'] = sentences return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a SyntaxOptions object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'tokens') and self.tokens is not None: - _dict['tokens'] = self.tokens._to_dict() + if isinstance(self.tokens, dict): + _dict['tokens'] = self.tokens + else: + _dict['tokens'] = self.tokens.to_dict() if hasattr(self, 'sentences') and self.sentences is not None: _dict['sentences'] = self.sentences return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this SyntaxOptions object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'SyntaxOptions') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'SyntaxOptions') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class SyntaxOptionsTokens(object): +class SyntaxOptionsTokens: """ Tokenization options. - :attr bool lemma: (optional) Set this to `true` to return the lemma for each token. - :attr bool part_of_speech: (optional) Set this to `true` to return the part of speech - for each token. + :param bool lemma: (optional) Set this to `true` to return the lemma for each + token. + :param bool part_of_speech: (optional) Set this to `true` to return the part of + speech for each token. """ - def __init__(self, lemma=None, part_of_speech=None): + def __init__( + self, + *, + lemma: Optional[bool] = None, + part_of_speech: Optional[bool] = None, + ) -> None: """ Initialize a SyntaxOptionsTokens object. - :param bool lemma: (optional) Set this to `true` to return the lemma for each - token. - :param bool part_of_speech: (optional) Set this to `true` to return the part of - speech for each token. + :param bool lemma: (optional) Set this to `true` to return the lemma for + each token. + :param bool part_of_speech: (optional) Set this to `true` to return the + part of speech for each token. """ self.lemma = lemma self.part_of_speech = part_of_speech @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'SyntaxOptionsTokens': """Initialize a SyntaxOptionsTokens object from a json dictionary.""" args = {} - if 'lemma' in _dict: - args['lemma'] = _dict.get('lemma') - if 'part_of_speech' in _dict: - args['part_of_speech'] = _dict.get('part_of_speech') + if (lemma := _dict.get('lemma')) is not None: + args['lemma'] = lemma + if (part_of_speech := _dict.get('part_of_speech')) is not None: + args['part_of_speech'] = part_of_speech return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a SyntaxOptionsTokens object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'lemma') and self.lemma is not None: @@ -3168,158 +5713,214 @@ def _to_dict(self): _dict['part_of_speech'] = self.part_of_speech return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this SyntaxOptionsTokens object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'SyntaxOptionsTokens') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'SyntaxOptionsTokens') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class SyntaxResult(object): +class SyntaxResult: """ Tokens and sentences returned from syntax analysis. - :attr list[TokenResult] tokens: (optional) - :attr list[SentenceResult] sentences: (optional) + :param List[TokenResult] tokens: (optional) + :param List[SentenceResult] sentences: (optional) """ - def __init__(self, tokens=None, sentences=None): + def __init__( + self, + *, + tokens: Optional[List['TokenResult']] = None, + sentences: Optional[List['SentenceResult']] = None, + ) -> None: """ Initialize a SyntaxResult object. - :param list[TokenResult] tokens: (optional) - :param list[SentenceResult] sentences: (optional) + :param List[TokenResult] tokens: (optional) + :param List[SentenceResult] sentences: (optional) """ self.tokens = tokens self.sentences = sentences @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'SyntaxResult': """Initialize a SyntaxResult object from a json dictionary.""" args = {} - if 'tokens' in _dict: - args['tokens'] = [ - TokenResult._from_dict(x) for x in (_dict.get('tokens')) - ] - if 'sentences' in _dict: - args['sentences'] = [ - SentenceResult._from_dict(x) for x in (_dict.get('sentences')) - ] + if (tokens := _dict.get('tokens')) is not None: + args['tokens'] = [TokenResult.from_dict(v) for v in tokens] + if (sentences := _dict.get('sentences')) is not None: + args['sentences'] = [SentenceResult.from_dict(v) for v in sentences] return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a SyntaxResult object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'tokens') and self.tokens is not None: - _dict['tokens'] = [x._to_dict() for x in self.tokens] + tokens_list = [] + for v in self.tokens: + if isinstance(v, dict): + tokens_list.append(v) + else: + tokens_list.append(v.to_dict()) + _dict['tokens'] = tokens_list if hasattr(self, 'sentences') and self.sentences is not None: - _dict['sentences'] = [x._to_dict() for x in self.sentences] + sentences_list = [] + for v in self.sentences: + if isinstance(v, dict): + sentences_list.append(v) + else: + sentences_list.append(v.to_dict()) + _dict['sentences'] = sentences_list return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this SyntaxResult object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'SyntaxResult') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'SyntaxResult') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class TargetedEmotionResults(object): +class TargetedEmotionResults: """ Emotion results for a specified target. - :attr str text: (optional) Targeted text. - :attr EmotionScores emotion: (optional) The emotion results for the target. + :param str text: (optional) Targeted text. + :param EmotionScores emotion: (optional) The emotion results for the target. """ - def __init__(self, text=None, emotion=None): + def __init__( + self, + *, + text: Optional[str] = None, + emotion: Optional['EmotionScores'] = None, + ) -> None: """ Initialize a TargetedEmotionResults object. :param str text: (optional) Targeted text. - :param EmotionScores emotion: (optional) The emotion results for the target. + :param EmotionScores emotion: (optional) The emotion results for the + target. """ self.text = text self.emotion = emotion @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'TargetedEmotionResults': """Initialize a TargetedEmotionResults object from a json dictionary.""" args = {} - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'emotion' in _dict: - args['emotion'] = EmotionScores._from_dict(_dict.get('emotion')) + if (text := _dict.get('text')) is not None: + args['text'] = text + if (emotion := _dict.get('emotion')) is not None: + args['emotion'] = EmotionScores.from_dict(emotion) return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a TargetedEmotionResults object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'text') and self.text is not None: _dict['text'] = self.text if hasattr(self, 'emotion') and self.emotion is not None: - _dict['emotion'] = self.emotion._to_dict() + if isinstance(self.emotion, dict): + _dict['emotion'] = self.emotion + else: + _dict['emotion'] = self.emotion.to_dict() return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this TargetedEmotionResults object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'TargetedEmotionResults') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'TargetedEmotionResults') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class TargetedSentimentResults(object): +class TargetedSentimentResults: """ TargetedSentimentResults. - :attr str text: (optional) Targeted text. - :attr float score: (optional) Sentiment score from -1 (negative) to 1 (positive). + :param str text: (optional) Targeted text. + :param float score: (optional) Sentiment score from -1 (negative) to 1 + (positive). """ - def __init__(self, text=None, score=None): + def __init__( + self, + *, + text: Optional[str] = None, + score: Optional[float] = None, + ) -> None: """ Initialize a TargetedSentimentResults object. :param str text: (optional) Targeted text. - :param float score: (optional) Sentiment score from -1 (negative) to 1 (positive). + :param float score: (optional) Sentiment score from -1 (negative) to 1 + (positive). """ self.text = text self.score = score @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'TargetedSentimentResults': """Initialize a TargetedSentimentResults object from a json dictionary.""" args = {} - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'score' in _dict: - args['score'] = _dict.get('score') + if (text := _dict.get('text')) is not None: + args['text'] = text + if (score := _dict.get('score')) is not None: + args['score'] = score return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a TargetedSentimentResults object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'text') and self.text is not None: @@ -3328,51 +5929,58 @@ def _to_dict(self): _dict['score'] = self.score return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this TargetedSentimentResults object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'TargetedSentimentResults') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'TargetedSentimentResults') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class TokenResult(object): +class TokenResult: """ TokenResult. - :attr str text: (optional) The token as it appears in the analyzed text. - :attr str part_of_speech: (optional) The part of speech of the token. For descriptions - of the values, see [Universal Dependencies POS - tags](https://universaldependencies.org/u/pos/). - :attr list[int] location: (optional) Character offsets indicating the beginning and - end of the token in the analyzed text. - :attr str lemma: (optional) The - [lemma](https://wikipedia.org/wiki/Lemma_%28morphology%29) of the token. + :param str text: (optional) The token as it appears in the analyzed text. + :param str part_of_speech: (optional) The part of speech of the token. For more + information about the values, see [Universal Dependencies POS + tags](https://universaldependencies.org/u/pos/). + :param List[int] location: (optional) Character offsets indicating the beginning + and end of the token in the analyzed text. + :param str lemma: (optional) The + [lemma](https://wikipedia.org/wiki/Lemma_%28morphology%29) of the token. """ - def __init__(self, - text=None, - part_of_speech=None, - location=None, - lemma=None): + def __init__( + self, + *, + text: Optional[str] = None, + part_of_speech: Optional[str] = None, + location: Optional[List[int]] = None, + lemma: Optional[str] = None, + ) -> None: """ Initialize a TokenResult object. :param str text: (optional) The token as it appears in the analyzed text. :param str part_of_speech: (optional) The part of speech of the token. For - descriptions of the values, see [Universal Dependencies POS - tags](https://universaldependencies.org/u/pos/). - :param list[int] location: (optional) Character offsets indicating the beginning - and end of the token in the analyzed text. + more information about the values, see [Universal Dependencies POS + tags](https://universaldependencies.org/u/pos/). + :param List[int] location: (optional) Character offsets indicating the + beginning and end of the token in the analyzed text. :param str lemma: (optional) The - [lemma](https://wikipedia.org/wiki/Lemma_%28morphology%29) of the token. + [lemma](https://wikipedia.org/wiki/Lemma_%28morphology%29) of the token. """ self.text = text self.part_of_speech = part_of_speech @@ -3380,20 +5988,25 @@ def __init__(self, self.lemma = lemma @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'TokenResult': """Initialize a TokenResult object from a json dictionary.""" args = {} - if 'text' in _dict: - args['text'] = _dict.get('text') - if 'part_of_speech' in _dict: - args['part_of_speech'] = _dict.get('part_of_speech') - if 'location' in _dict: - args['location'] = _dict.get('location') - if 'lemma' in _dict: - args['lemma'] = _dict.get('lemma') + if (text := _dict.get('text')) is not None: + args['text'] = text + if (part_of_speech := _dict.get('part_of_speech')) is not None: + args['part_of_speech'] = part_of_speech + if (location := _dict.get('location')) is not None: + args['location'] = location + if (lemma := _dict.get('lemma')) is not None: + args['lemma'] = lemma return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a TokenResult object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'text') and self.text is not None: @@ -3406,16 +6019,44 @@ def _to_dict(self): _dict['lemma'] = self.lemma return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this TokenResult object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'TokenResult') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'TokenResult') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other + + class PartOfSpeechEnum(str, Enum): + """ + The part of speech of the token. For more information about the values, see + [Universal Dependencies POS tags](https://universaldependencies.org/u/pos/). + """ + + ADJ = 'ADJ' + ADP = 'ADP' + ADV = 'ADV' + AUX = 'AUX' + CCONJ = 'CCONJ' + DET = 'DET' + INTJ = 'INTJ' + NOUN = 'NOUN' + NUM = 'NUM' + PART = 'PART' + PRON = 'PRON' + PROPN = 'PROPN' + PUNCT = 'PUNCT' + SCONJ = 'SCONJ' + SYM = 'SYM' + VERB = 'VERB' + X = 'X' diff --git a/ibm_watson/personality_insights_v3.py b/ibm_watson/personality_insights_v3.py deleted file mode 100644 index 822096fc1..000000000 --- a/ibm_watson/personality_insights_v3.py +++ /dev/null @@ -1,1126 +0,0 @@ -# coding: utf-8 - -# Copyright 2018 IBM All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -The IBM Watson™ Personality Insights service enables applications to derive insights -from social media, enterprise data, or other digital communications. The service uses -linguistic analytics to infer individuals' intrinsic personality characteristics, -including Big Five, Needs, and Values, from digital communications such as email, text -messages, tweets, and forum posts. -The service can automatically infer, from potentially noisy social media, portraits of -individuals that reflect their personality characteristics. The service can infer -consumption preferences based on the results of its analysis and, for JSON content that is -timestamped, can report temporal behavior. -* For information about the meaning of the models that the service uses to describe -personality characteristics, see [Personality -models](https://cloud.ibm.com/docs/services/personality-insights/models.html). -* For information about the meaning of the consumption preferences, see [Consumption -preferences](https://cloud.ibm.com/docs/services/personality-insights/preferences.html). -**Note:** Request logging is disabled for the Personality Insights service. Regardless of -whether you set the `X-Watson-Learning-Opt-Out` request header, the service does not log -or retain data from requests and responses. -""" - -from __future__ import absolute_import - -import json -from .common import get_sdk_headers -from ibm_cloud_sdk_core import BaseService - -############################################################################## -# Service -############################################################################## - - -class PersonalityInsightsV3(BaseService): - """The Personality Insights V3 service.""" - - default_url = 'https://gateway.watsonplatform.net/personality-insights/api' - - def __init__( - self, - version, - url=default_url, - username=None, - password=None, - iam_apikey=None, - iam_access_token=None, - iam_url=None, - ): - """ - Construct a new client for the Personality Insights service. - - :param str version: The API version date to use with the service, in - "YYYY-MM-DD" format. Whenever the API is changed in a backwards - incompatible way, a new minor version of the API is released. - The service uses the API version for the date you specify, or - the most recent version before that date. Note that you should - not programmatically specify the current date at runtime, in - case the API has been updated since your application's release. - Instead, specify a version date that is compatible with your - application, and don't change it until your application is - ready for a later version. - - :param str url: The base url to use when contacting the service (e.g. - "https://gateway.watsonplatform.net/personality-insights/api/personality-insights/api"). - The base url may differ between IBM Cloud regions. - - :param str username: The username used to authenticate with the service. - Username and password credentials are only required to run your - application locally or outside of IBM Cloud. When running on - IBM Cloud, the credentials will be automatically loaded from the - `VCAP_SERVICES` environment variable. - - :param str password: The password used to authenticate with the service. - Username and password credentials are only required to run your - application locally or outside of IBM Cloud. When running on - IBM Cloud, the credentials will be automatically loaded from the - `VCAP_SERVICES` environment variable. - - :param str iam_apikey: An API key that can be used to request IAM tokens. If - this API key is provided, the SDK will manage the token and handle the - refreshing. - - :param str iam_access_token: An IAM access token is fully managed by the application. - Responsibility falls on the application to refresh the token, either before - it expires or reactively upon receiving a 401 from the service as any requests - made with an expired token will fail. - - :param str iam_url: An optional URL for the IAM service API. Defaults to - 'https://iam.cloud.ibm.com/identity/token'. - """ - - BaseService.__init__( - self, - vcap_services_name='personality_insights', - url=url, - username=username, - password=password, - iam_apikey=iam_apikey, - iam_access_token=iam_access_token, - iam_url=iam_url, - use_vcap_services=True, - display_name='Personality Insights') - self.version = version - - ######################### - # Methods - ######################### - - def profile(self, - content, - accept, - content_language=None, - accept_language=None, - raw_scores=None, - csv_headers=None, - consumption_preferences=None, - content_type=None, - **kwargs): - """ - Get profile. - - Generates a personality profile for the author of the input text. The service - accepts a maximum of 20 MB of input content, but it requires much less text to - produce an accurate profile. The service can analyze text in Arabic, English, - Japanese, Korean, or Spanish. It can return its results in a variety of languages. - **See also:** - * [Requesting a - profile](https://cloud.ibm.com/docs/services/personality-insights/input.html) - * [Providing sufficient - input](https://cloud.ibm.com/docs/services/personality-insights/input.html#sufficient) - ### Content types - You can provide input content as plain text (`text/plain`), HTML (`text/html`), - or JSON (`application/json`) by specifying the **Content-Type** parameter. The - default is `text/plain`. - * Per the JSON specification, the default character encoding for JSON content is - effectively always UTF-8. - * Per the HTTP specification, the default encoding for plain text and HTML is - ISO-8859-1 (effectively, the ASCII character set). - When specifying a content type of plain text or HTML, include the `charset` - parameter to indicate the character encoding of the input text; for example, - `Content-Type: text/plain;charset=utf-8`. - **See also:** [Specifying request and response - formats](https://cloud.ibm.com/docs/services/personality-insights/input.html#formats) - ### Accept types - You must request a response as JSON (`application/json`) or comma-separated - values (`text/csv`) by specifying the **Accept** parameter. CSV output includes a - fixed number of columns. Set the **csv_headers** parameter to `true` to request - optional column headers for CSV output. - **See also:** - * [Understanding a JSON - profile](https://cloud.ibm.com/docs/services/personality-insights/output.html) - * [Understanding a CSV - profile](https://cloud.ibm.com/docs/services/personality-insights/output-csv.html). - - :param Content content: A maximum of 20 MB of content to analyze, though the - service requires much less text; for more information, see [Providing sufficient - input](https://cloud.ibm.com/docs/services/personality-insights/input.html#sufficient). - For JSON input, provide an object of type `Content`. - :param str accept: The type of the response. For more information, see **Accept - types** in the method description. - :param str content_language: The language of the input text for the request: - Arabic, English, Japanese, Korean, or Spanish. Regional variants are treated as - their parent language; for example, `en-US` is interpreted as `en`. - The effect of the **Content-Language** parameter depends on the **Content-Type** - parameter. When **Content-Type** is `text/plain` or `text/html`, - **Content-Language** is the only way to specify the language. When - **Content-Type** is `application/json`, **Content-Language** overrides a language - specified with the `language` parameter of a `ContentItem` object, and content - items that specify a different language are ignored; omit this parameter to base - the language on the specification of the content items. You can specify any - combination of languages for **Content-Language** and **Accept-Language**. - :param str accept_language: The desired language of the response. For - two-character arguments, regional variants are treated as their parent language; - for example, `en-US` is interpreted as `en`. You can specify any combination of - languages for the input and response content. - :param bool raw_scores: Indicates whether a raw score in addition to a normalized - percentile is returned for each characteristic; raw scores are not compared with a - sample population. By default, only normalized percentiles are returned. - :param bool csv_headers: Indicates whether column labels are returned with a CSV - response. By default, no column labels are returned. Applies only when the - response type is CSV (`text/csv`). - :param bool consumption_preferences: Indicates whether consumption preferences are - returned with the results. By default, no consumption preferences are returned. - :param str content_type: The type of the input. For more information, see - **Content types** in the method description. - Default: `text/plain`. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if content is None: - raise ValueError('content must be provided') - if accept is None: - raise ValueError('accept must be provided') - if isinstance(content, Content): - content = self._convert_model(content, Content) - - headers = { - 'Accept': accept, - 'Content-Language': content_language, - 'Accept-Language': accept_language, - 'Content-Type': content_type - } - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('personality_insights', 'V3', 'profile') - headers.update(sdk_headers) - - params = { - 'version': self.version, - 'raw_scores': raw_scores, - 'csv_headers': csv_headers, - 'consumption_preferences': consumption_preferences - } - - if content_type == 'application/json' and isinstance(content, dict): - data = json.dumps(content) - else: - data = content - - url = '/v3/profile' - response = self.request( - method='POST', - url=url, - headers=headers, - params=params, - data=data, - accept_json=(accept is None or accept == 'application/json')) - return response - - -############################################################################## -# Models -############################################################################## - - -class Behavior(object): - """ - Behavior. - - :attr str trait_id: The unique, non-localized identifier of the characteristic to - which the results pertain. IDs have the form `behavior_{value}`. - :attr str name: The user-visible, localized name of the characteristic. - :attr str category: The category of the characteristic: `behavior` for temporal data. - :attr float percentage: For JSON content that is timestamped, the percentage of - timestamped input data that occurred during that day of the week or hour of the day. - The range is 0 to 1. - """ - - def __init__(self, trait_id, name, category, percentage): - """ - Initialize a Behavior object. - - :param str trait_id: The unique, non-localized identifier of the characteristic to - which the results pertain. IDs have the form `behavior_{value}`. - :param str name: The user-visible, localized name of the characteristic. - :param str category: The category of the characteristic: `behavior` for temporal - data. - :param float percentage: For JSON content that is timestamped, the percentage of - timestamped input data that occurred during that day of the week or hour of the - day. The range is 0 to 1. - """ - self.trait_id = trait_id - self.name = name - self.category = category - self.percentage = percentage - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Behavior object from a json dictionary.""" - args = {} - if 'trait_id' in _dict: - args['trait_id'] = _dict.get('trait_id') - else: - raise ValueError( - 'Required property \'trait_id\' not present in Behavior JSON') - if 'name' in _dict: - args['name'] = _dict.get('name') - else: - raise ValueError( - 'Required property \'name\' not present in Behavior JSON') - if 'category' in _dict: - args['category'] = _dict.get('category') - else: - raise ValueError( - 'Required property \'category\' not present in Behavior JSON') - if 'percentage' in _dict: - args['percentage'] = _dict.get('percentage') - else: - raise ValueError( - 'Required property \'percentage\' not present in Behavior JSON') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'trait_id') and self.trait_id is not None: - _dict['trait_id'] = self.trait_id - if hasattr(self, 'name') and self.name is not None: - _dict['name'] = self.name - if hasattr(self, 'category') and self.category is not None: - _dict['category'] = self.category - if hasattr(self, 'percentage') and self.percentage is not None: - _dict['percentage'] = self.percentage - return _dict - - def __str__(self): - """Return a `str` version of this Behavior object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class ConsumptionPreferences(object): - """ - ConsumptionPreferences. - - :attr str consumption_preference_id: The unique, non-localized identifier of the - consumption preference to which the results pertain. IDs have the form - `consumption_preferences_{preference}`. - :attr str name: The user-visible, localized name of the consumption preference. - :attr float score: The score for the consumption preference: - * `0.0`: Unlikely - * `0.5`: Neutral - * `1.0`: Likely - The scores for some preferences are binary and do not allow a neutral value. The score - is an indication of preference based on the results inferred from the input text, not - a normalized percentile. - """ - - def __init__(self, consumption_preference_id, name, score): - """ - Initialize a ConsumptionPreferences object. - - :param str consumption_preference_id: The unique, non-localized identifier of the - consumption preference to which the results pertain. IDs have the form - `consumption_preferences_{preference}`. - :param str name: The user-visible, localized name of the consumption preference. - :param float score: The score for the consumption preference: - * `0.0`: Unlikely - * `0.5`: Neutral - * `1.0`: Likely - The scores for some preferences are binary and do not allow a neutral value. The - score is an indication of preference based on the results inferred from the input - text, not a normalized percentile. - """ - self.consumption_preference_id = consumption_preference_id - self.name = name - self.score = score - - @classmethod - def _from_dict(cls, _dict): - """Initialize a ConsumptionPreferences object from a json dictionary.""" - args = {} - if 'consumption_preference_id' in _dict: - args['consumption_preference_id'] = _dict.get( - 'consumption_preference_id') - else: - raise ValueError( - 'Required property \'consumption_preference_id\' not present in ConsumptionPreferences JSON' - ) - if 'name' in _dict: - args['name'] = _dict.get('name') - else: - raise ValueError( - 'Required property \'name\' not present in ConsumptionPreferences JSON' - ) - if 'score' in _dict: - args['score'] = _dict.get('score') - else: - raise ValueError( - 'Required property \'score\' not present in ConsumptionPreferences JSON' - ) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'consumption_preference_id' - ) and self.consumption_preference_id is not None: - _dict['consumption_preference_id'] = self.consumption_preference_id - if hasattr(self, 'name') and self.name is not None: - _dict['name'] = self.name - if hasattr(self, 'score') and self.score is not None: - _dict['score'] = self.score - return _dict - - def __str__(self): - """Return a `str` version of this ConsumptionPreferences object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class ConsumptionPreferencesCategory(object): - """ - ConsumptionPreferencesCategory. - - :attr str consumption_preference_category_id: The unique, non-localized identifier of - the consumption preferences category to which the results pertain. IDs have the form - `consumption_preferences_{category}`. - :attr str name: The user-visible name of the consumption preferences category. - :attr list[ConsumptionPreferences] consumption_preferences: Detailed results inferred - from the input text for the individual preferences of the category. - """ - - def __init__(self, consumption_preference_category_id, name, - consumption_preferences): - """ - Initialize a ConsumptionPreferencesCategory object. - - :param str consumption_preference_category_id: The unique, non-localized - identifier of the consumption preferences category to which the results pertain. - IDs have the form `consumption_preferences_{category}`. - :param str name: The user-visible name of the consumption preferences category. - :param list[ConsumptionPreferences] consumption_preferences: Detailed results - inferred from the input text for the individual preferences of the category. - """ - self.consumption_preference_category_id = consumption_preference_category_id - self.name = name - self.consumption_preferences = consumption_preferences - - @classmethod - def _from_dict(cls, _dict): - """Initialize a ConsumptionPreferencesCategory object from a json dictionary.""" - args = {} - if 'consumption_preference_category_id' in _dict: - args['consumption_preference_category_id'] = _dict.get( - 'consumption_preference_category_id') - else: - raise ValueError( - 'Required property \'consumption_preference_category_id\' not present in ConsumptionPreferencesCategory JSON' - ) - if 'name' in _dict: - args['name'] = _dict.get('name') - else: - raise ValueError( - 'Required property \'name\' not present in ConsumptionPreferencesCategory JSON' - ) - if 'consumption_preferences' in _dict: - args['consumption_preferences'] = [ - ConsumptionPreferences._from_dict(x) - for x in (_dict.get('consumption_preferences')) - ] - else: - raise ValueError( - 'Required property \'consumption_preferences\' not present in ConsumptionPreferencesCategory JSON' - ) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'consumption_preference_category_id' - ) and self.consumption_preference_category_id is not None: - _dict[ - 'consumption_preference_category_id'] = self.consumption_preference_category_id - if hasattr(self, 'name') and self.name is not None: - _dict['name'] = self.name - if hasattr(self, 'consumption_preferences' - ) and self.consumption_preferences is not None: - _dict['consumption_preferences'] = [ - x._to_dict() for x in self.consumption_preferences - ] - return _dict - - def __str__(self): - """Return a `str` version of this ConsumptionPreferencesCategory object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Content(object): - """ - Content. - - :attr list[ContentItem] content_items: An array of `ContentItem` objects that provides - the text that is to be analyzed. - """ - - def __init__(self, content_items): - """ - Initialize a Content object. - - :param list[ContentItem] content_items: An array of `ContentItem` objects that - provides the text that is to be analyzed. - """ - self.content_items = content_items - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Content object from a json dictionary.""" - args = {} - if 'contentItems' in _dict: - args['content_items'] = [ - ContentItem._from_dict(x) for x in (_dict.get('contentItems')) - ] - else: - raise ValueError( - 'Required property \'contentItems\' not present in Content JSON' - ) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'content_items') and self.content_items is not None: - _dict['contentItems'] = [x._to_dict() for x in self.content_items] - return _dict - - def __str__(self): - """Return a `str` version of this Content object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class ContentItem(object): - """ - ContentItem. - - :attr str content: The content that is to be analyzed. The service supports up to 20 - MB of content for all `ContentItem` objects combined. - :attr str id: (optional) A unique identifier for this content item. - :attr int created: (optional) A timestamp that identifies when this content was - created. Specify a value in milliseconds since the UNIX Epoch (January 1, 1970, at - 0:00 UTC). Required only for results that include temporal behavior data. - :attr int updated: (optional) A timestamp that identifies when this content was last - updated. Specify a value in milliseconds since the UNIX Epoch (January 1, 1970, at - 0:00 UTC). Required only for results that include temporal behavior data. - :attr str contenttype: (optional) The MIME type of the content. The default is plain - text. The tags are stripped from HTML content before it is analyzed; plain text is - processed as submitted. - :attr str language: (optional) The language identifier (two-letter ISO 639-1 - identifier) for the language of the content item. The default is `en` (English). - Regional variants are treated as their parent language; for example, `en-US` is - interpreted as `en`. A language specified with the **Content-Type** parameter - overrides the value of this parameter; any content items that specify a different - language are ignored. Omit the **Content-Type** parameter to base the language on the - most prevalent specification among the content items; again, content items that - specify a different language are ignored. You can specify any combination of languages - for the input and response content. - :attr str parentid: (optional) The unique ID of the parent content item for this item. - Used to identify hierarchical relationships between posts/replies, messages/replies, - and so on. - :attr bool reply: (optional) Indicates whether this content item is a reply to another - content item. - :attr bool forward: (optional) Indicates whether this content item is a - forwarded/copied version of another content item. - """ - - def __init__(self, - content, - id=None, - created=None, - updated=None, - contenttype=None, - language=None, - parentid=None, - reply=None, - forward=None): - """ - Initialize a ContentItem object. - - :param str content: The content that is to be analyzed. The service supports up to - 20 MB of content for all `ContentItem` objects combined. - :param str id: (optional) A unique identifier for this content item. - :param int created: (optional) A timestamp that identifies when this content was - created. Specify a value in milliseconds since the UNIX Epoch (January 1, 1970, at - 0:00 UTC). Required only for results that include temporal behavior data. - :param int updated: (optional) A timestamp that identifies when this content was - last updated. Specify a value in milliseconds since the UNIX Epoch (January 1, - 1970, at 0:00 UTC). Required only for results that include temporal behavior data. - :param str contenttype: (optional) The MIME type of the content. The default is - plain text. The tags are stripped from HTML content before it is analyzed; plain - text is processed as submitted. - :param str language: (optional) The language identifier (two-letter ISO 639-1 - identifier) for the language of the content item. The default is `en` (English). - Regional variants are treated as their parent language; for example, `en-US` is - interpreted as `en`. A language specified with the **Content-Type** parameter - overrides the value of this parameter; any content items that specify a different - language are ignored. Omit the **Content-Type** parameter to base the language on - the most prevalent specification among the content items; again, content items - that specify a different language are ignored. You can specify any combination of - languages for the input and response content. - :param str parentid: (optional) The unique ID of the parent content item for this - item. Used to identify hierarchical relationships between posts/replies, - messages/replies, and so on. - :param bool reply: (optional) Indicates whether this content item is a reply to - another content item. - :param bool forward: (optional) Indicates whether this content item is a - forwarded/copied version of another content item. - """ - self.content = content - self.id = id - self.created = created - self.updated = updated - self.contenttype = contenttype - self.language = language - self.parentid = parentid - self.reply = reply - self.forward = forward - - @classmethod - def _from_dict(cls, _dict): - """Initialize a ContentItem object from a json dictionary.""" - args = {} - if 'content' in _dict: - args['content'] = _dict.get('content') - else: - raise ValueError( - 'Required property \'content\' not present in ContentItem JSON') - if 'id' in _dict: - args['id'] = _dict.get('id') - if 'created' in _dict: - args['created'] = _dict.get('created') - if 'updated' in _dict: - args['updated'] = _dict.get('updated') - if 'contenttype' in _dict: - args['contenttype'] = _dict.get('contenttype') - if 'language' in _dict: - args['language'] = _dict.get('language') - if 'parentid' in _dict: - args['parentid'] = _dict.get('parentid') - if 'reply' in _dict: - args['reply'] = _dict.get('reply') - if 'forward' in _dict: - args['forward'] = _dict.get('forward') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'content') and self.content is not None: - _dict['content'] = self.content - if hasattr(self, 'id') and self.id is not None: - _dict['id'] = self.id - if hasattr(self, 'created') and self.created is not None: - _dict['created'] = self.created - if hasattr(self, 'updated') and self.updated is not None: - _dict['updated'] = self.updated - if hasattr(self, 'contenttype') and self.contenttype is not None: - _dict['contenttype'] = self.contenttype - if hasattr(self, 'language') and self.language is not None: - _dict['language'] = self.language - if hasattr(self, 'parentid') and self.parentid is not None: - _dict['parentid'] = self.parentid - if hasattr(self, 'reply') and self.reply is not None: - _dict['reply'] = self.reply - if hasattr(self, 'forward') and self.forward is not None: - _dict['forward'] = self.forward - return _dict - - def __str__(self): - """Return a `str` version of this ContentItem object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Profile(object): - """ - Profile. - - :attr str processed_language: The language model that was used to process the input. - :attr int word_count: The number of words from the input that were used to produce the - profile. - :attr str word_count_message: (optional) When guidance is appropriate, a string that - provides a message that indicates the number of words found and where that value falls - in the range of required or suggested number of words. - :attr list[Trait] personality: A recursive array of `Trait` objects that provides - detailed results for the Big Five personality characteristics (dimensions and facets) - inferred from the input text. - :attr list[Trait] needs: Detailed results for the Needs characteristics inferred from - the input text. - :attr list[Trait] values: Detailed results for the Values characteristics inferred - from the input text. - :attr list[Behavior] behavior: (optional) For JSON content that is timestamped, - detailed results about the social behavior disclosed by the input in terms of temporal - characteristics. The results include information about the distribution of the content - over the days of the week and the hours of the day. - :attr list[ConsumptionPreferencesCategory] consumption_preferences: (optional) If the - **consumption_preferences** parameter is `true`, detailed results for each category of - consumption preferences. Each element of the array provides information inferred from - the input text for the individual preferences of that category. - :attr list[Warning] warnings: Warning messages associated with the input text - submitted with the request. The array is empty if the input generated no warnings. - """ - - def __init__(self, - processed_language, - word_count, - personality, - needs, - values, - warnings, - word_count_message=None, - behavior=None, - consumption_preferences=None): - """ - Initialize a Profile object. - - :param str processed_language: The language model that was used to process the - input. - :param int word_count: The number of words from the input that were used to - produce the profile. - :param list[Trait] personality: A recursive array of `Trait` objects that provides - detailed results for the Big Five personality characteristics (dimensions and - facets) inferred from the input text. - :param list[Trait] needs: Detailed results for the Needs characteristics inferred - from the input text. - :param list[Trait] values: Detailed results for the Values characteristics - inferred from the input text. - :param list[Warning] warnings: Warning messages associated with the input text - submitted with the request. The array is empty if the input generated no warnings. - :param str word_count_message: (optional) When guidance is appropriate, a string - that provides a message that indicates the number of words found and where that - value falls in the range of required or suggested number of words. - :param list[Behavior] behavior: (optional) For JSON content that is timestamped, - detailed results about the social behavior disclosed by the input in terms of - temporal characteristics. The results include information about the distribution - of the content over the days of the week and the hours of the day. - :param list[ConsumptionPreferencesCategory] consumption_preferences: (optional) If - the **consumption_preferences** parameter is `true`, detailed results for each - category of consumption preferences. Each element of the array provides - information inferred from the input text for the individual preferences of that - category. - """ - self.processed_language = processed_language - self.word_count = word_count - self.word_count_message = word_count_message - self.personality = personality - self.needs = needs - self.values = values - self.behavior = behavior - self.consumption_preferences = consumption_preferences - self.warnings = warnings - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Profile object from a json dictionary.""" - args = {} - if 'processed_language' in _dict: - args['processed_language'] = _dict.get('processed_language') - else: - raise ValueError( - 'Required property \'processed_language\' not present in Profile JSON' - ) - if 'word_count' in _dict: - args['word_count'] = _dict.get('word_count') - else: - raise ValueError( - 'Required property \'word_count\' not present in Profile JSON') - if 'word_count_message' in _dict: - args['word_count_message'] = _dict.get('word_count_message') - if 'personality' in _dict: - args['personality'] = [ - Trait._from_dict(x) for x in (_dict.get('personality')) - ] - else: - raise ValueError( - 'Required property \'personality\' not present in Profile JSON') - if 'needs' in _dict: - args['needs'] = [Trait._from_dict(x) for x in (_dict.get('needs'))] - else: - raise ValueError( - 'Required property \'needs\' not present in Profile JSON') - if 'values' in _dict: - args['values'] = [ - Trait._from_dict(x) for x in (_dict.get('values')) - ] - else: - raise ValueError( - 'Required property \'values\' not present in Profile JSON') - if 'behavior' in _dict: - args['behavior'] = [ - Behavior._from_dict(x) for x in (_dict.get('behavior')) - ] - if 'consumption_preferences' in _dict: - args['consumption_preferences'] = [ - ConsumptionPreferencesCategory._from_dict(x) - for x in (_dict.get('consumption_preferences')) - ] - if 'warnings' in _dict: - args['warnings'] = [ - Warning._from_dict(x) for x in (_dict.get('warnings')) - ] - else: - raise ValueError( - 'Required property \'warnings\' not present in Profile JSON') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr( - self, - 'processed_language') and self.processed_language is not None: - _dict['processed_language'] = self.processed_language - if hasattr(self, 'word_count') and self.word_count is not None: - _dict['word_count'] = self.word_count - if hasattr( - self, - 'word_count_message') and self.word_count_message is not None: - _dict['word_count_message'] = self.word_count_message - if hasattr(self, 'personality') and self.personality is not None: - _dict['personality'] = [x._to_dict() for x in self.personality] - if hasattr(self, 'needs') and self.needs is not None: - _dict['needs'] = [x._to_dict() for x in self.needs] - if hasattr(self, 'values') and self.values is not None: - _dict['values'] = [x._to_dict() for x in self.values] - if hasattr(self, 'behavior') and self.behavior is not None: - _dict['behavior'] = [x._to_dict() for x in self.behavior] - if hasattr(self, 'consumption_preferences' - ) and self.consumption_preferences is not None: - _dict['consumption_preferences'] = [ - x._to_dict() for x in self.consumption_preferences - ] - if hasattr(self, 'warnings') and self.warnings is not None: - _dict['warnings'] = [x._to_dict() for x in self.warnings] - return _dict - - def __str__(self): - """Return a `str` version of this Profile object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Trait(object): - """ - Trait. - - :attr str trait_id: The unique, non-localized identifier of the characteristic to - which the results pertain. IDs have the form - * `big5_{characteristic}` for Big Five personality dimensions - * `facet_{characteristic}` for Big Five personality facets - * `need_{characteristic}` for Needs - *`value_{characteristic}` for Values. - :attr str name: The user-visible, localized name of the characteristic. - :attr str category: The category of the characteristic: `personality` for Big Five - personality characteristics, `needs` for Needs, and `values` for Values. - :attr float percentile: The normalized percentile score for the characteristic. The - range is 0 to 1. For example, if the percentage for Openness is 0.60, the author - scored in the 60th percentile; the author is more open than 59 percent of the - population and less open than 39 percent of the population. - :attr float raw_score: (optional) The raw score for the characteristic. The range is 0 - to 1. A higher score generally indicates a greater likelihood that the author has that - characteristic, but raw scores must be considered in aggregate: The range of values in - practice might be much smaller than 0 to 1, so an individual score must be considered - in the context of the overall scores and their range. - The raw score is computed based on the input and the service model; it is not - normalized or compared with a sample population. The raw score enables comparison of - the results against a different sampling population and with a custom normalization - approach. - :attr bool significant: (optional) **`2017-10-13`**: Indicates whether the - characteristic is meaningful for the input language. The field is always `true` for - all characteristics of English, Spanish, and Japanese input. The field is `false` for - the subset of characteristics of Arabic and Korean input for which the service's - models are unable to generate meaningful results. **`2016-10-19`**: Not returned. - :attr list[Trait] children: (optional) For `personality` (Big Five) dimensions, more - detailed results for the facets of each dimension as inferred from the input text. - """ - - def __init__(self, - trait_id, - name, - category, - percentile, - raw_score=None, - significant=None, - children=None): - """ - Initialize a Trait object. - - :param str trait_id: The unique, non-localized identifier of the characteristic to - which the results pertain. IDs have the form - * `big5_{characteristic}` for Big Five personality dimensions - * `facet_{characteristic}` for Big Five personality facets - * `need_{characteristic}` for Needs - *`value_{characteristic}` for Values. - :param str name: The user-visible, localized name of the characteristic. - :param str category: The category of the characteristic: `personality` for Big - Five personality characteristics, `needs` for Needs, and `values` for Values. - :param float percentile: The normalized percentile score for the characteristic. - The range is 0 to 1. For example, if the percentage for Openness is 0.60, the - author scored in the 60th percentile; the author is more open than 59 percent of - the population and less open than 39 percent of the population. - :param float raw_score: (optional) The raw score for the characteristic. The range - is 0 to 1. A higher score generally indicates a greater likelihood that the author - has that characteristic, but raw scores must be considered in aggregate: The range - of values in practice might be much smaller than 0 to 1, so an individual score - must be considered in the context of the overall scores and their range. - The raw score is computed based on the input and the service model; it is not - normalized or compared with a sample population. The raw score enables comparison - of the results against a different sampling population and with a custom - normalization approach. - :param bool significant: (optional) **`2017-10-13`**: Indicates whether the - characteristic is meaningful for the input language. The field is always `true` - for all characteristics of English, Spanish, and Japanese input. The field is - `false` for the subset of characteristics of Arabic and Korean input for which the - service's models are unable to generate meaningful results. **`2016-10-19`**: Not - returned. - :param list[Trait] children: (optional) For `personality` (Big Five) dimensions, - more detailed results for the facets of each dimension as inferred from the input - text. - """ - self.trait_id = trait_id - self.name = name - self.category = category - self.percentile = percentile - self.raw_score = raw_score - self.significant = significant - self.children = children - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Trait object from a json dictionary.""" - args = {} - if 'trait_id' in _dict: - args['trait_id'] = _dict.get('trait_id') - else: - raise ValueError( - 'Required property \'trait_id\' not present in Trait JSON') - if 'name' in _dict: - args['name'] = _dict.get('name') - else: - raise ValueError( - 'Required property \'name\' not present in Trait JSON') - if 'category' in _dict: - args['category'] = _dict.get('category') - else: - raise ValueError( - 'Required property \'category\' not present in Trait JSON') - if 'percentile' in _dict: - args['percentile'] = _dict.get('percentile') - else: - raise ValueError( - 'Required property \'percentile\' not present in Trait JSON') - if 'raw_score' in _dict: - args['raw_score'] = _dict.get('raw_score') - if 'significant' in _dict: - args['significant'] = _dict.get('significant') - if 'children' in _dict: - args['children'] = [ - Trait._from_dict(x) for x in (_dict.get('children')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'trait_id') and self.trait_id is not None: - _dict['trait_id'] = self.trait_id - if hasattr(self, 'name') and self.name is not None: - _dict['name'] = self.name - if hasattr(self, 'category') and self.category is not None: - _dict['category'] = self.category - if hasattr(self, 'percentile') and self.percentile is not None: - _dict['percentile'] = self.percentile - if hasattr(self, 'raw_score') and self.raw_score is not None: - _dict['raw_score'] = self.raw_score - if hasattr(self, 'significant') and self.significant is not None: - _dict['significant'] = self.significant - if hasattr(self, 'children') and self.children is not None: - _dict['children'] = [x._to_dict() for x in self.children] - return _dict - - def __str__(self): - """Return a `str` version of this Trait object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Warning(object): - """ - Warning. - - :attr str warning_id: The identifier of the warning message. - :attr str message: The message associated with the `warning_id`: - * `WORD_COUNT_MESSAGE`: "There were {number} words in the input. We need a minimum of - 600, preferably 1,200 or more, to compute statistically significant estimates." - * `JSON_AS_TEXT`: "Request input was processed as text/plain as indicated, however - detected a JSON input. Did you mean application/json?" - * `CONTENT_TRUNCATED`: "For maximum accuracy while also optimizing processing time, - only the first 250KB of input text (excluding markup) was analyzed. Accuracy levels - off at approximately 3,000 words so this did not affect the accuracy of the profile." - * `PARTIAL_TEXT_USED`, "The text provided to compute the profile was trimmed for - performance reasons. This action does not affect the accuracy of the output, as not - all of the input text was required." Applies only when Arabic input text exceeds a - threshold at which additional words do not contribute to the accuracy of the profile. - """ - - def __init__(self, warning_id, message): - """ - Initialize a Warning object. - - :param str warning_id: The identifier of the warning message. - :param str message: The message associated with the `warning_id`: - * `WORD_COUNT_MESSAGE`: "There were {number} words in the input. We need a minimum - of 600, preferably 1,200 or more, to compute statistically significant estimates." - * `JSON_AS_TEXT`: "Request input was processed as text/plain as indicated, however - detected a JSON input. Did you mean application/json?" - * `CONTENT_TRUNCATED`: "For maximum accuracy while also optimizing processing - time, only the first 250KB of input text (excluding markup) was analyzed. Accuracy - levels off at approximately 3,000 words so this did not affect the accuracy of the - profile." - * `PARTIAL_TEXT_USED`, "The text provided to compute the profile was trimmed for - performance reasons. This action does not affect the accuracy of the output, as - not all of the input text was required." Applies only when Arabic input text - exceeds a threshold at which additional words do not contribute to the accuracy of - the profile. - """ - self.warning_id = warning_id - self.message = message - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Warning object from a json dictionary.""" - args = {} - if 'warning_id' in _dict: - args['warning_id'] = _dict.get('warning_id') - else: - raise ValueError( - 'Required property \'warning_id\' not present in Warning JSON') - if 'message' in _dict: - args['message'] = _dict.get('message') - else: - raise ValueError( - 'Required property \'message\' not present in Warning JSON') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'warning_id') and self.warning_id is not None: - _dict['warning_id'] = self.warning_id - if hasattr(self, 'message') and self.message is not None: - _dict['message'] = self.message - return _dict - - def __str__(self): - """Return a `str` version of this Warning object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other diff --git a/ibm_watson/speech_to_text_v1.py b/ibm_watson/speech_to_text_v1.py index dba709ca3..1b1b47bc6 100644 --- a/ibm_watson/speech_to_text_v1.py +++ b/ibm_watson/speech_to_text_v1.py @@ -1,6 +1,6 @@ # coding: utf-8 -# Copyright 2018 IBM All Rights Reserved. +# (C) Copyright IBM Corp. 2015, 2026. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,13 +13,26 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +# IBM OpenAPI SDK Code Generator Version: 3.105.0-3c13b041-20250605-193116 """ -The IBM® Speech to Text service provides APIs that use IBM's speech-recognition -capabilities to produce transcripts of spoken audio. The service can transcribe speech -from various languages and audio formats. In addition to basic transcription, the service -can produce detailed information about many different aspects of the audio. For most -languages, the service supports two sampling rates, broadband and narrowband. It returns -all JSON response content in the UTF-8 character set. +The IBM Watson™ Speech to Text service provides APIs that use IBM's +speech-recognition capabilities to produce transcripts of spoken audio. The service can +transcribe speech from various languages and audio formats. In addition to basic +transcription, the service can produce detailed information about many different aspects +of the audio. It returns all JSON response content in the UTF-8 character set. +The service supports two types of models: previous-generation models that include the +terms `Broadband` and `Narrowband` in their names, and next-generation models that include +the terms `Multimedia` and `Telephony` in their names. Broadband and multimedia models +have minimum sampling rates of 16 kHz. Narrowband and telephony models have minimum +sampling rates of 8 kHz. The next-generation models offer high throughput and greater +transcription accuracy. +Effective **31 July 2023**, all previous-generation models will be removed from the +service and the documentation. Most previous-generation models were deprecated on 15 March +2022. You must migrate to the equivalent large speech model or next-generation model by 31 +July 2023. For more information, see [Migrating to large speech +models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-migrate).{: +deprecated} For speech recognition, the service supports synchronous and asynchronous HTTP Representational State Transfer (REST) interfaces. It also supports a WebSocket interface that provides a full-duplex, low-latency communication channel: Clients send requests and @@ -29,16 +42,24 @@ customization to adapt a base model for the acoustic characteristics of your audio. For language model customization, the service also supports grammars. A grammar is a formal language specification that lets you restrict the phrases that the service can recognize. -Language model customization is generally available for production use with most supported -languages. Acoustic model customization is beta functionality that is available for all -supported languages. -""" +Language model customization and grammars are available for most previous- and +next-generation models. Acoustic model customization is available for all +previous-generation models. -from __future__ import absolute_import +API Version: 1.0.0 +See: https://cloud.ibm.com/docs/speech-to-text +""" +from enum import Enum +from typing import BinaryIO, Dict, List, Optional import json + +from ibm_cloud_sdk_core import BaseService, DetailedResponse +from ibm_cloud_sdk_core.authenticators.authenticator import Authenticator +from ibm_cloud_sdk_core.get_authenticator import get_authenticator_from_environment +from ibm_cloud_sdk_core.utils import convert_list, convert_model + from .common import get_sdk_headers -from ibm_cloud_sdk_core import BaseService ############################################################################## # Service @@ -48,148 +69,162 @@ class SpeechToTextV1(BaseService): """The Speech to Text V1 service.""" - default_url = 'https://stream.watsonplatform.net/speech-to-text/api' + DEFAULT_SERVICE_URL = 'https://api.us-south.speech-to-text.watson.cloud.ibm.com' + DEFAULT_SERVICE_NAME = 'speech_to_text' def __init__( - self, - url=default_url, - username=None, - password=None, - iam_apikey=None, - iam_access_token=None, - iam_url=None, - ): + self, + authenticator: Authenticator = None, + service_name: str = DEFAULT_SERVICE_NAME, + ) -> None: """ Construct a new client for the Speech to Text service. - :param str url: The base url to use when contacting the service (e.g. - "https://stream.watsonplatform.net/speech-to-text/api/speech-to-text/api"). - The base url may differ between IBM Cloud regions. - - :param str username: The username used to authenticate with the service. - Username and password credentials are only required to run your - application locally or outside of IBM Cloud. When running on - IBM Cloud, the credentials will be automatically loaded from the - `VCAP_SERVICES` environment variable. - - :param str password: The password used to authenticate with the service. - Username and password credentials are only required to run your - application locally or outside of IBM Cloud. When running on - IBM Cloud, the credentials will be automatically loaded from the - `VCAP_SERVICES` environment variable. - - :param str iam_apikey: An API key that can be used to request IAM tokens. If - this API key is provided, the SDK will manage the token and handle the - refreshing. - - :param str iam_access_token: An IAM access token is fully managed by the application. - Responsibility falls on the application to refresh the token, either before - it expires or reactively upon receiving a 401 from the service as any requests - made with an expired token will fail. - - :param str iam_url: An optional URL for the IAM service API. Defaults to - 'https://iam.cloud.ibm.com/identity/token'. + :param Authenticator authenticator: The authenticator specifies the authentication mechanism. + Get up to date information from https://github.com/IBM/python-sdk-core/blob/main/README.md + about initializing the authenticator of your choice. """ - - BaseService.__init__( - self, - vcap_services_name='speech_to_text', - url=url, - username=username, - password=password, - iam_apikey=iam_apikey, - iam_access_token=iam_access_token, - iam_url=iam_url, - use_vcap_services=True, - display_name='Speech to Text') + if not authenticator: + authenticator = get_authenticator_from_environment(service_name) + BaseService.__init__(self, + service_url=self.DEFAULT_SERVICE_URL, + authenticator=authenticator) + self.configure_service(service_name) ######################### # Models ######################### - def get_model(self, model_id, **kwargs): + def list_models( + self, + **kwargs, + ) -> DetailedResponse: """ - Get a model. + List models. - Gets information for a single specified language model that is available for use - with the service. The information includes the name of the model and its minimum - sampling rate in Hertz, among other things. - **See also:** [Languages and - models](https://cloud.ibm.com/docs/services/speech-to-text/models.html). + Lists all language models that are available for use with the service. The + information includes the name of the model and its minimum sampling rate in Hertz, + among other things. The ordering of the list of models can change from call to + call; do not rely on an alphabetized or static list of models. + **See also:** [Listing all + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-list#models-list-all). - :param str model_id: The identifier of the model in the form of its name from the - output of the **Get a model** method. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `SpeechModels` object """ - if model_id is None: - raise ValueError('model_id must be provided') - headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='list_models', + ) + headers.update(sdk_headers) + if 'headers' in kwargs: headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', 'get_model') - headers.update(sdk_headers) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + url = '/v1/models' + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + ) - url = '/v1/models/{0}'.format(*self._encode_path_vars(model_id)) - response = self.request( - method='GET', url=url, headers=headers, accept_json=True) + response = self.send(request, **kwargs) return response - def list_models(self, **kwargs): + def get_model( + self, + model_id: str, + **kwargs, + ) -> DetailedResponse: """ - List models. + Get a model. - Lists all language models that are available for use with the service. The - information includes the name of the model and its minimum sampling rate in Hertz, - among other things. - **See also:** [Languages and - models](https://cloud.ibm.com/docs/services/speech-to-text/models.html). + Gets information for a single specified language model that is available for use + with the service. The information includes the name of the model and its minimum + sampling rate in Hertz, among other things. + **See also:** [Listing a specific + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-list#models-list-specific). + :param str model_id: The identifier of the model in the form of its name + from the output of the [List models](#listmodels) method. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `SpeechModel` object """ + if not model_id: + raise ValueError('model_id must be provided') headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='get_model', + ) + headers.update(sdk_headers) + if 'headers' in kwargs: headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', 'list_models') - headers.update(sdk_headers) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['model_id'] + path_param_values = self.encode_path_vars(model_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/models/{model_id}'.format(**path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + ) - url = '/v1/models' - response = self.request( - method='GET', url=url, headers=headers, accept_json=True) + response = self.send(request, **kwargs) return response ######################### # Synchronous ######################### - def recognize(self, - audio, - model=None, - language_customization_id=None, - acoustic_customization_id=None, - base_model_version=None, - customization_weight=None, - inactivity_timeout=None, - keywords=None, - keywords_threshold=None, - max_alternatives=None, - word_alternatives_threshold=None, - word_confidence=None, - timestamps=None, - profanity_filter=None, - smart_formatting=None, - speaker_labels=None, - customization_id=None, - grammar_name=None, - redaction=None, - content_type=None, - **kwargs): + def recognize( + self, + audio: BinaryIO, + *, + content_type: Optional[str] = None, + model: Optional[str] = None, + speech_begin_event: Optional[bool] = None, + enrichments: Optional[str] = None, + language_customization_id: Optional[str] = None, + acoustic_customization_id: Optional[str] = None, + base_model_version: Optional[str] = None, + customization_weight: Optional[float] = None, + inactivity_timeout: Optional[int] = None, + keywords: Optional[List[str]] = None, + keywords_threshold: Optional[float] = None, + max_alternatives: Optional[int] = None, + word_alternatives_threshold: Optional[float] = None, + word_confidence: Optional[bool] = None, + timestamps: Optional[bool] = None, + profanity_filter: Optional[bool] = None, + smart_formatting: Optional[bool] = None, + smart_formatting_version: Optional[int] = None, + speaker_labels: Optional[bool] = None, + grammar_name: Optional[str] = None, + redaction: Optional[bool] = None, + audio_metrics: Optional[bool] = None, + end_of_phrase_silence_time: Optional[float] = None, + split_transcript_at_phrase_end: Optional[bool] = None, + speech_detector_sensitivity: Optional[float] = None, + sad_module: Optional[int] = None, + background_audio_suppression: Optional[float] = None, + low_latency: Optional[bool] = None, + character_insertion_bias: Optional[float] = None, + **kwargs, + ) -> DetailedResponse: """ Recognize audio. @@ -198,9 +233,10 @@ def recognize(self, service automatically detects the endianness of the incoming audio and, for audio that includes multiple channels, downmixes the audio to one-channel mono during transcoding. The method returns only final results; to enable interim results, use - the WebSocket API. + the WebSocket API. (With the `curl` command, use the `--data-binary` option to + upload the file for the request.) **See also:** [Making a basic HTTP - request](https://cloud.ibm.com/docs/services/speech-to-text/http.html#HTTP-basic). + request](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-http#HTTP-basic). ### Streaming mode For requests to transcribe live audio as it becomes available, you must set the `Transfer-Encoding` header to `chunked` to use streaming mode. In streaming mode, @@ -211,9 +247,9 @@ def recognize(self, parameter to change the default of 30 seconds. **See also:** * [Audio - transmission](https://cloud.ibm.com/docs/services/speech-to-text/input.html#transmission) + transmission](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#transmission) * - [Timeouts](https://cloud.ibm.com/docs/services/speech-to-text/input.html#timeouts) + [Timeouts](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#timeouts) ### Audio formats (content types) The service accepts audio in the following formats (MIME types). * For formats that are labeled **Required**, you must use the `Content-Type` @@ -221,7 +257,7 @@ def recognize(self, * For all other formats, you can omit the `Content-Type` header or specify `application/octet-stream` with the header to have the service automatically detect the format of the audio. (With the `curl` command, you can specify either - `\"Content-Type:\"` or `\"Content-Type: application/octet-stream\"`.) + `"Content-Type:"` or `"Content-Type: application/octet-stream"`.) Where indicated, the format that you specify must include the sampling rate and can optionally include the number of channels and the endianness of the audio. * `audio/alaw` (**Required.** Specify the sampling rate (`rate`) of the audio.) @@ -246,172 +282,376 @@ def recognize(self, required rate, the service down-samples the audio to the appropriate rate. If the sampling rate of the audio is lower than the minimum required rate, the request fails. - **See also:** [Audio - formats](https://cloud.ibm.com/docs/services/speech-to-text/audio-formats.html). + **See also:** [Supported audio + formats](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-audio-formats). + ### Large speech models and Next-generation models + The service supports large speech models and next-generation `Multimedia` (16 + kHz) and `Telephony` (8 kHz) models for many languages. Large speech models and + next-generation models have higher throughput than the service's previous + generation of `Broadband` and `Narrowband` models. When you use large speech + models and next-generation models, the service can return transcriptions more + quickly and also provide noticeably better transcription accuracy. + You specify a large speech model or next-generation model by using the `model` + query parameter, as you do a previous-generation model. Only the next-generation + models support the `low_latency` parameter, and all large speech models and + next-generation models support the `character_insertion_bias` parameter. These + parameters are not available with previous-generation models. + Large speech models and next-generation models do not support all of the speech + recognition parameters that are available for use with previous-generation models. + Next-generation models do not support the following parameters: + * `acoustic_customization_id` + * `keywords` and `keywords_threshold` + * `processing_metrics` and `processing_metrics_interval` + * `word_alternatives_threshold` + **Important:** Effective **31 July 2023**, all previous-generation models will be + removed from the service and the documentation. Most previous-generation models + were deprecated on 15 March 2022. You must migrate to the equivalent large speech + model or next-generation model by 31 July 2023. For more information, see + [Migrating to large speech + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-migrate). + **See also:** + * [Large speech languages and + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-large-speech-languages) + * [Supported features for large speech + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-large-speech-languages#models-lsm-supported-features) + * [Next-generation languages and + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng) + * [Supported features for next-generation + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng#models-ng-features) ### Multipart speech recognition - **Note:** The Watson SDKs do not support multipart speech recognition. + **Note:** The asynchronous HTTP interface, WebSocket interface, and Watson SDKs + do not support multipart speech recognition. The HTTP `POST` method of the service also supports multipart speech recognition. With multipart requests, you pass all audio data as multipart form data. You specify some parameters as request headers and query parameters, but you pass JSON - metadata as form data to control most aspects of the transcription. - The multipart approach is intended for use with browsers for which JavaScript is - disabled or when the parameters used with the request are greater than the 8 KB - limit imposed by most HTTP servers and proxies. You can encounter this limit, for - example, if you want to spot a very large number of keywords. + metadata as form data to control most aspects of the transcription. You can use + multipart recognition to pass multiple audio files with a single request. + Use the multipart approach with browsers for which JavaScript is disabled or when + the parameters used with the request are greater than the 8 KB limit imposed by + most HTTP servers and proxies. You can encounter this limit, for example, if you + want to spot a very large number of keywords. **See also:** [Making a multipart HTTP - request](https://cloud.ibm.com/docs/services/speech-to-text/http.html#HTTP-multi). - - :param file audio: The audio to transcribe. - :param str model: The identifier of the model that is to be used for the - recognition request. See [Languages and - models](https://cloud.ibm.com/docs/services/speech-to-text/models.html). - :param str language_customization_id: The customization ID (GUID) of a custom - language model that is to be used with the recognition request. The base model of - the specified custom language model must match the model specified with the - `model` parameter. You must make the request with credentials for the instance of - the service that owns the custom model. By default, no custom language model is - used. See [Custom - models](https://cloud.ibm.com/docs/services/speech-to-text/input.html#custom-input). - **Note:** Use this parameter instead of the deprecated `customization_id` - parameter. - :param str acoustic_customization_id: The customization ID (GUID) of a custom - acoustic model that is to be used with the recognition request. The base model of - the specified custom acoustic model must match the model specified with the - `model` parameter. You must make the request with credentials for the instance of - the service that owns the custom model. By default, no custom acoustic model is - used. See [Custom - models](https://cloud.ibm.com/docs/services/speech-to-text/input.html#custom-input). - :param str base_model_version: The version of the specified base model that is to - be used with recognition request. Multiple versions of a base model can exist when - a model is updated for internal improvements. The parameter is intended primarily - for use with custom models that have been upgraded for a new base model. The - default value depends on whether the parameter is used with or without a custom - model. See [Base model - version](https://cloud.ibm.com/docs/services/speech-to-text/input.html#version). - :param float customization_weight: If you specify the customization ID (GUID) of a - custom language model with the recognition request, the customization weight tells - the service how much weight to give to words from the custom language model - compared to those from the base model for the current request. - Specify a value between 0.0 and 1.0. Unless a different customization weight was - specified for the custom model when it was trained, the default value is 0.3. A - customization weight that you specify overrides a weight that was specified when - the custom model was trained. - The default value yields the best performance in general. Assign a higher value if - your audio makes frequent use of OOV words from the custom model. Use caution when - setting the weight: a higher value can improve the accuracy of phrases from the - custom model's domain, but it can negatively affect performance on non-domain - phrases. - See [Custom - models](https://cloud.ibm.com/docs/services/speech-to-text/input.html#custom-input). - :param int inactivity_timeout: The time in seconds after which, if only silence - (no speech) is detected in streaming audio, the connection is closed with a 400 - error. The parameter is useful for stopping audio submission from a live - microphone when a user simply walks away. Use `-1` for infinity. See [Inactivity - timeout](https://cloud.ibm.com/docs/services/speech-to-text/input.html#timeouts-inactivity). - :param list[str] keywords: An array of keyword strings to spot in the audio. Each - keyword string can include one or more string tokens. Keywords are spotted only in - the final results, not in interim hypotheses. If you specify any keywords, you - must also specify a keywords threshold. You can spot a maximum of 1000 keywords. - Omit the parameter or specify an empty array if you do not need to spot keywords. - See [Keyword - spotting](https://cloud.ibm.com/docs/services/speech-to-text/output.html#keyword_spotting). - :param float keywords_threshold: A confidence value that is the lower bound for - spotting a keyword. A word is considered to match a keyword if its confidence is - greater than or equal to the threshold. Specify a probability between 0.0 and 1.0. - If you specify a threshold, you must also specify one or more keywords. The - service performs no keyword spotting if you omit either parameter. See [Keyword - spotting](https://cloud.ibm.com/docs/services/speech-to-text/output.html#keyword_spotting). - :param int max_alternatives: The maximum number of alternative transcripts that - the service is to return. By default, the service returns a single transcript. If - you specify a value of `0`, the service uses the default value, `1`. See [Maximum - alternatives](https://cloud.ibm.com/docs/services/speech-to-text/output.html#max_alternatives). - :param float word_alternatives_threshold: A confidence value that is the lower - bound for identifying a hypothesis as a possible word alternative (also known as - \"Confusion Networks\"). An alternative word is considered if its confidence is - greater than or equal to the threshold. Specify a probability between 0.0 and 1.0. - By default, the service computes no alternative words. See [Word - alternatives](https://cloud.ibm.com/docs/services/speech-to-text/output.html#word_alternatives). - :param bool word_confidence: If `true`, the service returns a confidence measure - in the range of 0.0 to 1.0 for each word. By default, the service returns no word - confidence scores. See [Word - confidence](https://cloud.ibm.com/docs/services/speech-to-text/output.html#word_confidence). - :param bool timestamps: If `true`, the service returns time alignment for each - word. By default, no timestamps are returned. See [Word - timestamps](https://cloud.ibm.com/docs/services/speech-to-text/output.html#word_timestamps). - :param bool profanity_filter: If `true`, the service filters profanity from all - output except for keyword results by replacing inappropriate words with a series - of asterisks. Set the parameter to `false` to return results with no censoring. - Applies to US English transcription only. See [Profanity - filtering](https://cloud.ibm.com/docs/services/speech-to-text/output.html#profanity_filter). - :param bool smart_formatting: If `true`, the service converts dates, times, series - of digits and numbers, phone numbers, currency values, and internet addresses into - more readable, conventional representations in the final transcript of a - recognition request. For US English, the service also converts certain keyword - strings to punctuation symbols. By default, the service performs no smart - formatting. - **Note:** Applies to US English, Japanese, and Spanish transcription only. - See [Smart - formatting](https://cloud.ibm.com/docs/services/speech-to-text/output.html#smart_formatting). - :param bool speaker_labels: If `true`, the response includes labels that identify - which words were spoken by which participants in a multi-person exchange. By - default, the service returns no speaker labels. Setting `speaker_labels` to `true` - forces the `timestamps` parameter to be `true`, regardless of whether you specify - `false` for the parameter. - **Note:** Applies to US English, Japanese, and Spanish transcription only. To - determine whether a language model supports speaker labels, you can also use the - **Get a model** method and check that the attribute `speaker_labels` is set to - `true`. - See [Speaker - labels](https://cloud.ibm.com/docs/services/speech-to-text/output.html#speaker_labels). - :param str customization_id: **Deprecated.** Use the `language_customization_id` - parameter to specify the customization ID (GUID) of a custom language model that - is to be used with the recognition request. Do not specify both parameters with a - request. - :param str grammar_name: The name of a grammar that is to be used with the - recognition request. If you specify a grammar, you must also use the - `language_customization_id` parameter to specify the name of the custom language - model for which the grammar is defined. The service recognizes only strings that - are recognized by the specified grammar; it does not recognize other custom words - from the model's words resource. See - [Grammars](https://cloud.ibm.com/docs/services/speech-to-text/input.html#grammars-input). - :param bool redaction: If `true`, the service redacts, or masks, numeric data from - final transcripts. The feature redacts any number that has three or more - consecutive digits by replacing each digit with an `X` character. It is intended - to redact sensitive numeric data, such as credit card numbers. By default, the - service performs no redaction. - When you enable redaction, the service automatically enables smart formatting, - regardless of whether you explicitly disable that feature. To ensure maximum - security, the service also disables keyword spotting (ignores the `keywords` and - `keywords_threshold` parameters) and returns only a single final transcript - (forces the `max_alternatives` parameter to be `1`). - **Note:** Applies to US English, Japanese, and Korean transcription only. - See [Numeric - redaction](https://cloud.ibm.com/docs/services/speech-to-text/output.html#redaction). - :param str content_type: The format (MIME type) of the audio. For more information - about specifying an audio format, see **Audio formats (content types)** in the - method description. + request](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-http#HTTP-multi). + + :param BinaryIO audio: The audio to transcribe. + :param str content_type: (optional) The format (MIME type) of the audio. + For more information about specifying an audio format, see **Audio formats + (content types)** in the method description. + :param str model: (optional) The model to use for speech recognition. If + you omit the `model` parameter, the service uses the US English + `en-US_BroadbandModel` by default. + _For IBM Cloud Pak for Data,_ if you do not install the + `en-US_BroadbandModel`, you must either specify a model with the request or + specify a new default model for your installation of the service. + **See also:** + * [Using a model for speech + recognition](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-use) + * [Using the default + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-use#models-use-default). + :param bool speech_begin_event: (optional) If `true`, the service returns a + response object `SpeechActivity` which contains the time when a speech + activity is detected in the stream. This can be used both in standard and + low latency mode. This feature enables client applications to know that + some words/speech has been detected and the service is in the process of + decoding. This can be used in lieu of interim results in standard mode. Use + `sad_module: 2` to increase accuracy and performance in detecting speech + boundaries within the audio stream. See [Using speech recognition + parameters](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-service-features#features-parameters). + :param str enrichments: (optional) Speech transcript enrichment improves + readability of raw ASR transcripts by adding punctuation (periods, commas, + question marks, exclamation points) and intelligent capitalization + (sentence beginnings, proper nouns, acronyms, brand names). To enable + enrichment, add the `enrichments=punctuation` parameter to your recognition + request. Supported languages include English (US, UK, Australia, India), + French (France, Canada), German, Italian, Portuguese (Brazil, Portugal), + Spanish (Spain, Latin America, Argentina, Chile, Colombia, Mexico, Peru), + and Japanese. See [Speech transcript + enrichment](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-speech-transcript-enrichment). + :param str language_customization_id: (optional) The customization ID + (GUID) of a custom language model that is to be used with the recognition + request. The base model of the specified custom language model must match + the model specified with the `model` parameter. You must make the request + with credentials for the instance of the service that owns the custom + model. By default, no custom language model is used. See [Using a custom + language model for speech + recognition](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-languageUse). + **Note:** Use this parameter instead of the deprecated `customization_id` + parameter. + :param str acoustic_customization_id: (optional) The customization ID + (GUID) of a custom acoustic model that is to be used with the recognition + request. The base model of the specified custom acoustic model must match + the model specified with the `model` parameter. You must make the request + with credentials for the instance of the service that owns the custom + model. By default, no custom acoustic model is used. See [Using a custom + acoustic model for speech + recognition](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-acousticUse). + :param str base_model_version: (optional) The version of the specified base + model that is to be used with the recognition request. Multiple versions of + a base model can exist when a model is updated for internal improvements. + The parameter is intended primarily for use with custom models that have + been upgraded for a new base model. The default value depends on whether + the parameter is used with or without a custom model. See [Making speech + recognition requests with upgraded custom + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-upgrade-use#custom-upgrade-use-recognition). + :param float customization_weight: (optional) If you specify the + customization ID (GUID) of a custom language model with the recognition + request, the customization weight tells the service how much weight to give + to words from the custom language model compared to those from the base + model for the current request. + Specify a value between 0.0 and 1.0. Unless a different customization + weight was specified for the custom model when the model was trained, the + default value is: + * 0.5 for large speech models + * 0.3 for previous-generation models + * 0.2 for most next-generation models + * 0.1 for next-generation English and Japanese models + A customization weight that you specify overrides a weight that was + specified when the custom model was trained. The default value yields the + best performance in general. Assign a higher value if your audio makes + frequent use of OOV words from the custom model. Use caution when setting + the weight: a higher value can improve the accuracy of phrases from the + custom model's domain, but it can negatively affect performance on + non-domain phrases. + See [Using customization + weight](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-languageUse#weight). + :param int inactivity_timeout: (optional) The time in seconds after which, + if only silence (no speech) is detected in streaming audio, the connection + is closed with a 400 error. The parameter is useful for stopping audio + submission from a live microphone when a user simply walks away. Use `-1` + for infinity. See [Inactivity + timeout](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#timeouts-inactivity). + :param List[str] keywords: (optional) An array of keyword strings to spot + in the audio. Each keyword string can include one or more string tokens. + Keywords are spotted only in the final results, not in interim hypotheses. + If you specify any keywords, you must also specify a keywords threshold. + Omit the parameter or specify an empty array if you do not need to spot + keywords. + You can spot a maximum of 1000 keywords with a single request. A single + keyword can have a maximum length of 1024 characters, though the maximum + effective length for double-byte languages might be shorter. Keywords are + case-insensitive. + See [Keyword + spotting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-spotting#keyword-spotting). + :param float keywords_threshold: (optional) A confidence value that is the + lower bound for spotting a keyword. A word is considered to match a keyword + if its confidence is greater than or equal to the threshold. Specify a + probability between 0.0 and 1.0. If you specify a threshold, you must also + specify one or more keywords. The service performs no keyword spotting if + you omit either parameter. See [Keyword + spotting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-spotting#keyword-spotting). + :param int max_alternatives: (optional) The maximum number of alternative + transcripts that the service is to return. By default, the service returns + a single transcript. If you specify a value of `0`, the service uses the + default value, `1`. See [Maximum + alternatives](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metadata#max-alternatives). + :param float word_alternatives_threshold: (optional) A confidence value + that is the lower bound for identifying a hypothesis as a possible word + alternative (also known as "Confusion Networks"). An alternative word is + considered if its confidence is greater than or equal to the threshold. + Specify a probability between 0.0 and 1.0. By default, the service computes + no alternative words. See [Word + alternatives](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-spotting#word-alternatives). + :param bool word_confidence: (optional) If `true`, the service returns a + confidence measure in the range of 0.0 to 1.0 for each word. By default, + the service returns no word confidence scores. See [Word + confidence](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metadata#word-confidence). + :param bool timestamps: (optional) If `true`, the service returns time + alignment for each word. By default, no timestamps are returned. See [Word + timestamps](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metadata#word-timestamps). + :param bool profanity_filter: (optional) If `true`, the service filters + profanity from all output except for keyword results by replacing + inappropriate words with a series of asterisks. Set the parameter to + `false` to return results with no censoring. + **Note:** The parameter can be used with US English and Japanese + transcription only. See [Profanity + filtering](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-formatting#profanity-filtering). + :param bool smart_formatting: (optional) If `true`, the service converts + dates, times, series of digits and numbers, phone numbers, currency values, + and internet addresses into more readable, conventional representations in + the final transcript of a recognition request. For US English, the service + also converts certain keyword strings to punctuation symbols. By default, + the service performs no smart formatting. + **Note:** The parameter can be used with US English, Japanese, and Spanish + (all dialects) transcription only. + See [Smart + formatting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-formatting#smart-formatting). + :param int smart_formatting_version: (optional) Smart formatting version + for large speech models and next-generation models is supported in US + English, Brazilian Portuguese, French, German, Spanish and French Canadian + languages. + :param bool speaker_labels: (optional) If `true`, the response includes + labels that identify which words were spoken by which participants in a + multi-person exchange. By default, the service returns no speaker labels. + Setting `speaker_labels` to `true` forces the `timestamps` parameter to be + `true`, regardless of whether you specify `false` for the parameter. + * _For previous-generation models,_ the parameter can be used with + Australian English, US English, German, Japanese, Korean, and Spanish (both + broadband and narrowband models) and UK English (narrowband model) + transcription only. + * _For large speech models and next-generation models,_ the parameter can + be used with all available languages. + See [Speaker + labels](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-speaker-labels). + :param str grammar_name: (optional) The name of a grammar that is to be + used with the recognition request. If you specify a grammar, you must also + use the `language_customization_id` parameter to specify the name of the + custom language model for which the grammar is defined. The service + recognizes only strings that are recognized by the specified grammar; it + does not recognize other custom words from the model's words resource. + See [Using a grammar for speech + recognition](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-grammarUse). + :param bool redaction: (optional) If `true`, the service redacts, or masks, + numeric data from final transcripts. The feature redacts any number that + has three or more consecutive digits by replacing each digit with an `X` + character. It is intended to redact sensitive numeric data, such as credit + card numbers. By default, the service performs no redaction. + When you enable redaction, the service automatically enables smart + formatting, regardless of whether you explicitly disable that feature. To + ensure maximum security, the service also disables keyword spotting + (ignores the `keywords` and `keywords_threshold` parameters) and returns + only a single final transcript (forces the `max_alternatives` parameter to + be `1`). + **Note:** The parameter can be used with US English, Japanese, and Korean + transcription only. + See [Numeric + redaction](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-formatting#numeric-redaction). + :param bool audio_metrics: (optional) If `true`, requests detailed + information about the signal characteristics of the input audio. The + service returns audio metrics with the final transcription results. By + default, the service returns no audio metrics. + See [Audio + metrics](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metrics#audio-metrics). + :param float end_of_phrase_silence_time: (optional) Specifies the duration + of the pause interval at which the service splits a transcript into + multiple final results. If the service detects pauses or extended silence + before it reaches the end of the audio stream, its response can include + multiple final results. Silence indicates a point at which the speaker + pauses between spoken words or phrases. + Specify a value for the pause interval in the range of 0.0 to 120.0. + * A value greater than 0 specifies the interval that the service is to use + for speech recognition. + * A value of 0 indicates that the service is to use the default interval. + It is equivalent to omitting the parameter. + The default pause interval for most languages is 0.8 seconds; the default + for Chinese is 0.6 seconds. + See [End of phrase silence + time](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-parsing#silence-time). + :param bool split_transcript_at_phrase_end: (optional) If `true`, directs + the service to split the transcript into multiple final results based on + semantic features of the input, for example, at the conclusion of + meaningful phrases such as sentences. The service bases its understanding + of semantic features on the base language model that you use with a + request. Custom language models and grammars can also influence how and + where the service splits a transcript. + By default, the service splits transcripts based solely on the pause + interval. If the parameters are used together on the same request, + `end_of_phrase_silence_time` has precedence over + `split_transcript_at_phrase_end`. + See [Split transcript at phrase + end](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-parsing#split-transcript). + :param float speech_detector_sensitivity: (optional) The sensitivity of + speech activity detection that the service is to perform. Use the parameter + to suppress word insertions from music, coughing, and other non-speech + events. The service biases the audio it passes for speech recognition by + evaluating the input audio against prior models of speech and non-speech + activity. + Specify a value between 0.0 and 1.0: + * 0.0 suppresses all audio (no speech is transcribed). + * 0.5 (the default) provides a reasonable compromise for the level of + sensitivity. + * 1.0 suppresses no audio (speech detection sensitivity is disabled). + The values increase on a monotonic curve. Specifying one or two decimal + places of precision (for example, `0.55`) is typically more than + sufficient. + The parameter is supported with all large speech models, next-generation + models and with most previous-generation models. See [Speech detector + sensitivity](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-detection#detection-parameters-sensitivity) + and [Language model + support](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-detection#detection-support). + :param int sad_module: (optional) Detects speech boundaries within the + audio stream with better performance, improved noise suppression, faster + responsiveness, and increased accuracy. + Specify `sad_module: 2` + See [Speech Activity Detection + (SAD)](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-detection#sad). + :param float background_audio_suppression: (optional) The level to which + the service is to suppress background audio based on its volume to prevent + it from being transcribed as speech. Use the parameter to suppress side + conversations or background noise. + Specify a value in the range of 0.0 to 1.0: + * 0.0 (the default) provides no suppression (background audio suppression + is disabled). + * 0.5 provides a reasonable level of audio suppression for general usage. + * 1.0 suppresses all audio (no audio is transcribed). + The values increase on a monotonic curve. Specifying one or two decimal + places of precision (for example, `0.55`) is typically more than + sufficient. + The parameter is supported with all large speech models, next-generation + models and with most previous-generation models. See [Background audio + suppression](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-detection#detection-parameters-suppression) + and [Language model + support](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-detection#detection-support). + :param bool low_latency: (optional) If `true` for next-generation + `Multimedia` and `Telephony` models that support low latency, directs the + service to produce results even more quickly than it usually does. + Next-generation models produce transcription results faster than + previous-generation models. The `low_latency` parameter causes the models + to produce results even more quickly, though the results might be less + accurate when the parameter is used. + The parameter is not available for large speech models and + previous-generation `Broadband` and `Narrowband` models. It is available + for most next-generation models. + * For a list of next-generation models that support low latency, see + [Supported next-generation language + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng#models-ng-supported). + * For more information about the `low_latency` parameter, see [Low + latency](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-interim#low-latency). + :param float character_insertion_bias: (optional) For large speech models + and next-generation models, an indication of whether the service is biased + to recognize shorter or longer strings of characters when developing + transcription hypotheses. By default, the service is optimized to produce + the best balance of strings of different lengths. + The default bias is 0.0. The allowable range of values is -1.0 to 1.0. + * Negative values bias the service to favor hypotheses with shorter strings + of characters. + * Positive values bias the service to favor hypotheses with longer strings + of characters. + As the value approaches -1.0 or 1.0, the impact of the parameter becomes + more pronounced. To determine the most effective value for your scenario, + start by setting the value of the parameter to a small increment, such as + -0.1, -0.05, 0.05, or 0.1, and assess how the value impacts the + transcription results. Then experiment with different values as necessary, + adjusting the value by small increments. + The parameter is not available for previous-generation models. + See [Character insertion + bias](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-parsing#insertion-bias). :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `SpeechRecognitionResults` object """ if audio is None: raise ValueError('audio must be provided') - - headers = {'Content-Type': content_type} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', 'recognize') + headers = { + 'Content-Type': content_type, + } + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='recognize', + ) headers.update(sdk_headers) params = { 'model': model, + 'speech_begin_event': speech_begin_event, + 'enrichments': enrichments, 'language_customization_id': language_customization_id, 'acoustic_customization_id': acoustic_customization_id, 'base_model_version': base_model_version, 'customization_weight': customization_weight, 'inactivity_timeout': inactivity_timeout, - 'keywords': self._convert_list(keywords), + 'keywords': convert_list(keywords), 'keywords_threshold': keywords_threshold, 'max_alternatives': max_alternatives, 'word_alternatives_threshold': word_alternatives_threshold, @@ -419,124 +659,221 @@ def recognize(self, 'timestamps': timestamps, 'profanity_filter': profanity_filter, 'smart_formatting': smart_formatting, + 'smart_formatting_version': smart_formatting_version, 'speaker_labels': speaker_labels, - 'customization_id': customization_id, 'grammar_name': grammar_name, - 'redaction': redaction + 'redaction': redaction, + 'audio_metrics': audio_metrics, + 'end_of_phrase_silence_time': end_of_phrase_silence_time, + 'split_transcript_at_phrase_end': split_transcript_at_phrase_end, + 'speech_detector_sensitivity': speech_detector_sensitivity, + 'sad_module': sad_module, + 'background_audio_suppression': background_audio_suppression, + 'low_latency': low_latency, + 'character_insertion_bias': character_insertion_bias, } data = audio + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + url = '/v1/recognize' - response = self.request( + request = self.prepare_request( method='POST', url=url, headers=headers, params=params, data=data, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response ######################### # Asynchronous ######################### - def check_job(self, id, **kwargs): + def register_callback( + self, + callback_url: str, + *, + user_secret: Optional[str] = None, + **kwargs, + ) -> DetailedResponse: """ - Check a job. - - Returns information about the specified job. The response always includes the - status of the job and its creation and update times. If the status is `completed`, - the response includes the results of the recognition request. You must use - credentials for the instance of the service that owns a job to list information - about it. - You can use the method to retrieve the results of any job, regardless of whether - it was submitted with a callback URL and the `recognitions.completed_with_results` - event, and you can retrieve the results multiple times for as long as they remain - available. Use the **Check jobs** method to request information about the most - recent jobs associated with the calling credentials. - **See also:** [Checking the status and retrieving the results of a - job](https://cloud.ibm.com/docs/services/speech-to-text/async.html#job). + Register a callback. - :param str id: The identifier of the asynchronous job that is to be used for the - request. You must make the request with credentials for the instance of the - service that owns the job. + Registers a callback URL with the service for use with subsequent asynchronous + recognition requests. The service attempts to register, or allowlist, the callback + URL if it is not already registered by sending a `GET` request to the callback + URL. The service passes a random alphanumeric challenge string via the + `challenge_string` parameter of the request. The request includes an `Accept` + header that specifies `text/plain` as the required response type. + To be registered successfully, the callback URL must respond to the `GET` request + from the service. The response must send status code 200 and must include the + challenge string in its body. Set the `Content-Type` response header to + `text/plain`. Upon receiving this response, the service responds to the original + registration request with response code 201. + The service sends only a single `GET` request to the callback URL. If the service + does not receive a reply with a response code of 200 and a body that echoes the + challenge string sent by the service within five seconds, it does not allowlist + the URL; it instead sends status code 400 in response to the request to register a + callback. If the requested callback URL is already allowlisted, the service + responds to the initial registration request with response code 200. + If you specify a user secret with the request, the service uses it as a key to + calculate an HMAC-SHA1 signature of the challenge string in its response to the + `POST` request. It sends this signature in the `X-Callback-Signature` header of + its `GET` request to the URL during registration. It also uses the secret to + calculate a signature over the payload of every callback notification that uses + the URL. The signature provides authentication and data integrity for HTTP + communications. + After you successfully register a callback URL, you can use it with an indefinite + number of recognition requests. You can register a maximum of 20 callback URLS in + a one-hour span of time. + **See also:** [Registering a callback + URL](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-async#register). + + :param str callback_url: An HTTP or HTTPS URL to which callback + notifications are to be sent. To be allowlisted, the URL must successfully + echo the challenge string during URL verification. During verification, the + client can also check the signature that the service sends in the + `X-Callback-Signature` header to verify the origin of the request. + :param str user_secret: (optional) A user-specified string that the service + uses to generate the HMAC-SHA1 signature that it sends via the + `X-Callback-Signature` header. The service includes the header during URL + verification and with every notification sent to the callback URL. It + calculates the signature over the payload of the notification. If you omit + the parameter, the service does not send the header. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `RegisterStatus` object """ - if id is None: - raise ValueError('id must be provided') - + if not callback_url: + raise ValueError('callback_url must be provided') headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='register_callback', + ) + headers.update(sdk_headers) + + params = { + 'callback_url': callback_url, + 'user_secret': user_secret, + } + if 'headers' in kwargs: headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', 'check_job') - headers.update(sdk_headers) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + url = '/v1/register_callback' + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + ) - url = '/v1/recognitions/{0}'.format(*self._encode_path_vars(id)) - response = self.request( - method='GET', url=url, headers=headers, accept_json=True) + response = self.send(request, **kwargs) return response - def check_jobs(self, **kwargs): + def unregister_callback( + self, + callback_url: str, + **kwargs, + ) -> DetailedResponse: """ - Check jobs. + Unregister a callback. - Returns the ID and status of the latest 100 outstanding jobs associated with the - credentials with which it is called. The method also returns the creation and - update times of each job, and, if a job was created with a callback URL and a user - token, the user token for the job. To obtain the results for a job whose status is - `completed` or not one of the latest 100 outstanding jobs, use the **Check a job** - method. A job and its results remain available until you delete them with the - **Delete a job** method or until the job's time to live expires, whichever comes - first. - **See also:** [Checking the status of the latest - jobs](https://cloud.ibm.com/docs/services/speech-to-text/async.html#jobs). + Unregisters a callback URL that was previously allowlisted with a [Register a + callback](#registercallback) request for use with the asynchronous interface. Once + unregistered, the URL can no longer be used with asynchronous recognition + requests. + **See also:** [Unregistering a callback + URL](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-async#unregister). + :param str callback_url: The callback URL that is to be unregistered. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ + if not callback_url: + raise ValueError('callback_url must be provided') headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='unregister_callback', + ) + headers.update(sdk_headers) + + params = { + 'callback_url': callback_url, + } + if 'headers' in kwargs: headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', 'check_jobs') - headers.update(sdk_headers) + del kwargs['headers'] - url = '/v1/recognitions' - response = self.request( - method='GET', url=url, headers=headers, accept_json=True) + url = '/v1/unregister_callback' + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) return response - def create_job(self, - audio, - model=None, - callback_url=None, - events=None, - user_token=None, - results_ttl=None, - language_customization_id=None, - acoustic_customization_id=None, - base_model_version=None, - customization_weight=None, - inactivity_timeout=None, - keywords=None, - keywords_threshold=None, - max_alternatives=None, - word_alternatives_threshold=None, - word_confidence=None, - timestamps=None, - profanity_filter=None, - smart_formatting=None, - speaker_labels=None, - customization_id=None, - grammar_name=None, - redaction=None, - content_type=None, - **kwargs): + def create_job( + self, + audio: BinaryIO, + *, + content_type: Optional[str] = None, + model: Optional[str] = None, + callback_url: Optional[str] = None, + events: Optional[str] = None, + user_token: Optional[str] = None, + results_ttl: Optional[int] = None, + speech_begin_event: Optional[bool] = None, + enrichments: Optional[str] = None, + language_customization_id: Optional[str] = None, + acoustic_customization_id: Optional[str] = None, + base_model_version: Optional[str] = None, + customization_weight: Optional[float] = None, + inactivity_timeout: Optional[int] = None, + keywords: Optional[List[str]] = None, + keywords_threshold: Optional[float] = None, + max_alternatives: Optional[int] = None, + word_alternatives_threshold: Optional[float] = None, + word_confidence: Optional[bool] = None, + timestamps: Optional[bool] = None, + profanity_filter: Optional[bool] = None, + smart_formatting: Optional[bool] = None, + smart_formatting_version: Optional[int] = None, + speaker_labels: Optional[bool] = None, + grammar_name: Optional[str] = None, + redaction: Optional[bool] = None, + processing_metrics: Optional[bool] = None, + processing_metrics_interval: Optional[float] = None, + audio_metrics: Optional[bool] = None, + end_of_phrase_silence_time: Optional[float] = None, + split_transcript_at_phrase_end: Optional[bool] = None, + speech_detector_sensitivity: Optional[float] = None, + sad_module: Optional[int] = None, + background_audio_suppression: Optional[float] = None, + low_latency: Optional[bool] = None, + character_insertion_bias: Optional[float] = None, + **kwargs, + ) -> DetailedResponse: """ Create a job. @@ -550,16 +887,16 @@ def create_job(self, to subscribe to specific events and to specify a string that is to be included with each notification for the job. * By polling the service: Omit the `callback_url`, `events`, and `user_token` - parameters. You must then use the **Check jobs** or **Check a job** methods to - check the status of the job, using the latter to retrieve the results when the job - is complete. + parameters. You must then use the [Check jobs](#checkjobs) or [Check a + job](#checkjob) methods to check the status of the job, using the latter to + retrieve the results when the job is complete. The two approaches are not mutually exclusive. You can poll the service for job status or obtain results from the service manually even if you include a callback URL. In both cases, you can include the `results_ttl` parameter to specify how long the results are to remain available after the job is complete. Using the - HTTPS **Check a job** method to retrieve results is more secure than receiving - them via callback notification over HTTP because it provides confidentiality in - addition to authentication and data integrity. + HTTPS [Check a job](#checkjob) method to retrieve results is more secure than + receiving them via callback notification over HTTP because it provides + confidentiality in addition to authentication and data integrity. The method supports the same basic parameters as other HTTP and WebSocket recognition requests. It also supports the following parameters specific to the asynchronous interface: @@ -571,9 +908,10 @@ def create_job(self, The service automatically detects the endianness of the incoming audio and, for audio that includes multiple channels, downmixes the audio to one-channel mono during transcoding. The method returns only final results; to enable interim - results, use the WebSocket API. + results, use the WebSocket API. (With the `curl` command, use the `--data-binary` + option to upload the file for the request.) **See also:** [Creating a - job](https://cloud.ibm.com/docs/services/speech-to-text/async.html#create). + job](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-async#create). ### Streaming mode For requests to transcribe live audio as it becomes available, you must set the `Transfer-Encoding` header to `chunked` to use streaming mode. In streaming mode, @@ -584,9 +922,9 @@ def create_job(self, parameter to change the default of 30 seconds. **See also:** * [Audio - transmission](https://cloud.ibm.com/docs/services/speech-to-text/input.html#transmission) + transmission](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#transmission) * - [Timeouts](https://cloud.ibm.com/docs/services/speech-to-text/input.html#timeouts) + [Timeouts](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#timeouts) ### Audio formats (content types) The service accepts audio in the following formats (MIME types). * For formats that are labeled **Required**, you must use the `Content-Type` @@ -594,7 +932,7 @@ def create_job(self, * For all other formats, you can omit the `Content-Type` header or specify `application/octet-stream` with the header to have the service automatically detect the format of the audio. (With the `curl` command, you can specify either - `\"Content-Type:\"` or `\"Content-Type: application/octet-stream\"`.) + `"Content-Type:"` or `"Content-Type: application/octet-stream"`.) Where indicated, the format that you specify must include the sampling rate and can optionally include the number of channels and the endianness of the audio. * `audio/alaw` (**Required.** Specify the sampling rate (`rate`) of the audio.) @@ -619,182 +957,408 @@ def create_job(self, required rate, the service down-samples the audio to the appropriate rate. If the sampling rate of the audio is lower than the minimum required rate, the request fails. - **See also:** [Audio - formats](https://cloud.ibm.com/docs/services/speech-to-text/audio-formats.html). - - :param file audio: The audio to transcribe. - :param str model: The identifier of the model that is to be used for the - recognition request. See [Languages and - models](https://cloud.ibm.com/docs/services/speech-to-text/models.html). - :param str callback_url: A URL to which callback notifications are to be sent. The - URL must already be successfully white-listed by using the **Register a callback** - method. You can include the same callback URL with any number of job creation - requests. Omit the parameter to poll the service for job completion and results. - Use the `user_token` parameter to specify a unique user-specified string with each - job to differentiate the callback notifications for the jobs. - :param str events: If the job includes a callback URL, a comma-separated list of - notification events to which to subscribe. Valid events are - * `recognitions.started` generates a callback notification when the service begins - to process the job. - * `recognitions.completed` generates a callback notification when the job is - complete. You must use the **Check a job** method to retrieve the results before - they time out or are deleted. - * `recognitions.completed_with_results` generates a callback notification when the - job is complete. The notification includes the results of the request. - * `recognitions.failed` generates a callback notification if the service - experiences an error while processing the job. - The `recognitions.completed` and `recognitions.completed_with_results` events are - incompatible. You can specify only of the two events. - If the job includes a callback URL, omit the parameter to subscribe to the default - events: `recognitions.started`, `recognitions.completed`, and - `recognitions.failed`. If the job does not include a callback URL, omit the - parameter. - :param str user_token: If the job includes a callback URL, a user-specified string - that the service is to include with each callback notification for the job; the - token allows the user to maintain an internal mapping between jobs and - notification events. If the job does not include a callback URL, omit the - parameter. - :param int results_ttl: The number of minutes for which the results are to be - available after the job has finished. If not delivered via a callback, the results - must be retrieved within this time. Omit the parameter to use a time to live of - one week. The parameter is valid with or without a callback URL. - :param str language_customization_id: The customization ID (GUID) of a custom - language model that is to be used with the recognition request. The base model of - the specified custom language model must match the model specified with the - `model` parameter. You must make the request with credentials for the instance of - the service that owns the custom model. By default, no custom language model is - used. See [Custom - models](https://cloud.ibm.com/docs/services/speech-to-text/input.html#custom-input). - **Note:** Use this parameter instead of the deprecated `customization_id` - parameter. - :param str acoustic_customization_id: The customization ID (GUID) of a custom - acoustic model that is to be used with the recognition request. The base model of - the specified custom acoustic model must match the model specified with the - `model` parameter. You must make the request with credentials for the instance of - the service that owns the custom model. By default, no custom acoustic model is - used. See [Custom - models](https://cloud.ibm.com/docs/services/speech-to-text/input.html#custom-input). - :param str base_model_version: The version of the specified base model that is to - be used with recognition request. Multiple versions of a base model can exist when - a model is updated for internal improvements. The parameter is intended primarily - for use with custom models that have been upgraded for a new base model. The - default value depends on whether the parameter is used with or without a custom - model. See [Base model - version](https://cloud.ibm.com/docs/services/speech-to-text/input.html#version). - :param float customization_weight: If you specify the customization ID (GUID) of a - custom language model with the recognition request, the customization weight tells - the service how much weight to give to words from the custom language model - compared to those from the base model for the current request. - Specify a value between 0.0 and 1.0. Unless a different customization weight was - specified for the custom model when it was trained, the default value is 0.3. A - customization weight that you specify overrides a weight that was specified when - the custom model was trained. - The default value yields the best performance in general. Assign a higher value if - your audio makes frequent use of OOV words from the custom model. Use caution when - setting the weight: a higher value can improve the accuracy of phrases from the - custom model's domain, but it can negatively affect performance on non-domain - phrases. - See [Custom - models](https://cloud.ibm.com/docs/services/speech-to-text/input.html#custom-input). - :param int inactivity_timeout: The time in seconds after which, if only silence - (no speech) is detected in streaming audio, the connection is closed with a 400 - error. The parameter is useful for stopping audio submission from a live - microphone when a user simply walks away. Use `-1` for infinity. See [Inactivity - timeout](https://cloud.ibm.com/docs/services/speech-to-text/input.html#timeouts-inactivity). - :param list[str] keywords: An array of keyword strings to spot in the audio. Each - keyword string can include one or more string tokens. Keywords are spotted only in - the final results, not in interim hypotheses. If you specify any keywords, you - must also specify a keywords threshold. You can spot a maximum of 1000 keywords. - Omit the parameter or specify an empty array if you do not need to spot keywords. - See [Keyword - spotting](https://cloud.ibm.com/docs/services/speech-to-text/output.html#keyword_spotting). - :param float keywords_threshold: A confidence value that is the lower bound for - spotting a keyword. A word is considered to match a keyword if its confidence is - greater than or equal to the threshold. Specify a probability between 0.0 and 1.0. - If you specify a threshold, you must also specify one or more keywords. The - service performs no keyword spotting if you omit either parameter. See [Keyword - spotting](https://cloud.ibm.com/docs/services/speech-to-text/output.html#keyword_spotting). - :param int max_alternatives: The maximum number of alternative transcripts that - the service is to return. By default, the service returns a single transcript. If - you specify a value of `0`, the service uses the default value, `1`. See [Maximum - alternatives](https://cloud.ibm.com/docs/services/speech-to-text/output.html#max_alternatives). - :param float word_alternatives_threshold: A confidence value that is the lower - bound for identifying a hypothesis as a possible word alternative (also known as - \"Confusion Networks\"). An alternative word is considered if its confidence is - greater than or equal to the threshold. Specify a probability between 0.0 and 1.0. - By default, the service computes no alternative words. See [Word - alternatives](https://cloud.ibm.com/docs/services/speech-to-text/output.html#word_alternatives). - :param bool word_confidence: If `true`, the service returns a confidence measure - in the range of 0.0 to 1.0 for each word. By default, the service returns no word - confidence scores. See [Word - confidence](https://cloud.ibm.com/docs/services/speech-to-text/output.html#word_confidence). - :param bool timestamps: If `true`, the service returns time alignment for each - word. By default, no timestamps are returned. See [Word - timestamps](https://cloud.ibm.com/docs/services/speech-to-text/output.html#word_timestamps). - :param bool profanity_filter: If `true`, the service filters profanity from all - output except for keyword results by replacing inappropriate words with a series - of asterisks. Set the parameter to `false` to return results with no censoring. - Applies to US English transcription only. See [Profanity - filtering](https://cloud.ibm.com/docs/services/speech-to-text/output.html#profanity_filter). - :param bool smart_formatting: If `true`, the service converts dates, times, series - of digits and numbers, phone numbers, currency values, and internet addresses into - more readable, conventional representations in the final transcript of a - recognition request. For US English, the service also converts certain keyword - strings to punctuation symbols. By default, the service performs no smart - formatting. - **Note:** Applies to US English, Japanese, and Spanish transcription only. - See [Smart - formatting](https://cloud.ibm.com/docs/services/speech-to-text/output.html#smart_formatting). - :param bool speaker_labels: If `true`, the response includes labels that identify - which words were spoken by which participants in a multi-person exchange. By - default, the service returns no speaker labels. Setting `speaker_labels` to `true` - forces the `timestamps` parameter to be `true`, regardless of whether you specify - `false` for the parameter. - **Note:** Applies to US English, Japanese, and Spanish transcription only. To - determine whether a language model supports speaker labels, you can also use the - **Get a model** method and check that the attribute `speaker_labels` is set to - `true`. - See [Speaker - labels](https://cloud.ibm.com/docs/services/speech-to-text/output.html#speaker_labels). - :param str customization_id: **Deprecated.** Use the `language_customization_id` - parameter to specify the customization ID (GUID) of a custom language model that - is to be used with the recognition request. Do not specify both parameters with a - request. - :param str grammar_name: The name of a grammar that is to be used with the - recognition request. If you specify a grammar, you must also use the - `language_customization_id` parameter to specify the name of the custom language - model for which the grammar is defined. The service recognizes only strings that - are recognized by the specified grammar; it does not recognize other custom words - from the model's words resource. See - [Grammars](https://cloud.ibm.com/docs/services/speech-to-text/input.html#grammars-input). - :param bool redaction: If `true`, the service redacts, or masks, numeric data from - final transcripts. The feature redacts any number that has three or more - consecutive digits by replacing each digit with an `X` character. It is intended - to redact sensitive numeric data, such as credit card numbers. By default, the - service performs no redaction. - When you enable redaction, the service automatically enables smart formatting, - regardless of whether you explicitly disable that feature. To ensure maximum - security, the service also disables keyword spotting (ignores the `keywords` and - `keywords_threshold` parameters) and returns only a single final transcript - (forces the `max_alternatives` parameter to be `1`). - **Note:** Applies to US English, Japanese, and Korean transcription only. - See [Numeric - redaction](https://cloud.ibm.com/docs/services/speech-to-text/output.html#redaction). - :param str content_type: The format (MIME type) of the audio. For more information - about specifying an audio format, see **Audio formats (content types)** in the - method description. + **See also:** [Supported audio + formats](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-audio-formats). + ### Large speech models and Next-generation models + The service supports large speech models and next-generation `Multimedia` (16 + kHz) and `Telephony` (8 kHz) models for many languages. Large speech models and + next-generation models have higher throughput than the service's previous + generation of `Broadband` and `Narrowband` models. When you use large speech + models and next-generation models, the service can return transcriptions more + quickly and also provide noticeably better transcription accuracy. + You specify a large speech model or next-generation model by using the `model` + query parameter, as you do a previous-generation model. Only the next-generation + models support the `low_latency` parameter, and all large speech models and + next-generation models support the `character_insertion_bias` parameter. These + parameters are not available with previous-generation models. + Large speech models and next-generation models do not support all of the speech + recognition parameters that are available for use with previous-generation models. + Next-generation models do not support the following parameters: + * `acoustic_customization_id` + * `keywords` and `keywords_threshold` + * `processing_metrics` and `processing_metrics_interval` + * `word_alternatives_threshold` + **Important:** Effective **31 July 2023**, all previous-generation models will be + removed from the service and the documentation. Most previous-generation models + were deprecated on 15 March 2022. You must migrate to the equivalent large speech + model or next-generation model by 31 July 2023. For more information, see + [Migrating to large speech + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-migrate). + **See also:** + * [Large speech languages and + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-large-speech-languages) + * [Supported features for large speech + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-large-speech-languages#models-lsm-supported-features) + * [Next-generation languages and + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng) + * [Supported features for next-generation + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng#models-ng-features). + + :param BinaryIO audio: The audio to transcribe. + :param str content_type: (optional) The format (MIME type) of the audio. + For more information about specifying an audio format, see **Audio formats + (content types)** in the method description. + :param str model: (optional) The model to use for speech recognition. If + you omit the `model` parameter, the service uses the US English + `en-US_BroadbandModel` by default. + _For IBM Cloud Pak for Data,_ if you do not install the + `en-US_BroadbandModel`, you must either specify a model with the request or + specify a new default model for your installation of the service. + **See also:** + * [Using a model for speech + recognition](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-use) + * [Using the default + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-use#models-use-default). + :param str callback_url: (optional) A URL to which callback notifications + are to be sent. The URL must already be successfully allowlisted by using + the [Register a callback](#registercallback) method. You can include the + same callback URL with any number of job creation requests. Omit the + parameter to poll the service for job completion and results. + Use the `user_token` parameter to specify a unique user-specified string + with each job to differentiate the callback notifications for the jobs. + :param str events: (optional) If the job includes a callback URL, a + comma-separated list of notification events to which to subscribe. Valid + events are + * `recognitions.started` generates a callback notification when the service + begins to process the job. + * `recognitions.completed` generates a callback notification when the job + is complete. You must use the [Check a job](#checkjob) method to retrieve + the results before they time out or are deleted. + * `recognitions.completed_with_results` generates a callback notification + when the job is complete. The notification includes the results of the + request. + * `recognitions.failed` generates a callback notification if the service + experiences an error while processing the job. + The `recognitions.completed` and `recognitions.completed_with_results` + events are incompatible. You can specify only of the two events. + If the job includes a callback URL, omit the parameter to subscribe to the + default events: `recognitions.started`, `recognitions.completed`, and + `recognitions.failed`. If the job does not include a callback URL, omit the + parameter. + :param str user_token: (optional) If the job includes a callback URL, a + user-specified string that the service is to include with each callback + notification for the job; the token allows the user to maintain an internal + mapping between jobs and notification events. If the job does not include a + callback URL, omit the parameter. + :param int results_ttl: (optional) The number of minutes for which the + results are to be available after the job has finished. If not delivered + via a callback, the results must be retrieved within this time. Omit the + parameter to use a time to live of one week. The parameter is valid with or + without a callback URL. + :param bool speech_begin_event: (optional) If `true`, the service returns a + response object `SpeechActivity` which contains the time when a speech + activity is detected in the stream. This can be used both in standard and + low latency mode. This feature enables client applications to know that + some words/speech has been detected and the service is in the process of + decoding. This can be used in lieu of interim results in standard mode. Use + `sad_module: 2` to increase accuracy and performance in detecting speech + boundaries within the audio stream. See [Using speech recognition + parameters](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-service-features#features-parameters). + :param str enrichments: (optional) Speech transcript enrichment improves + readability of raw ASR transcripts by adding punctuation (periods, commas, + question marks, exclamation points) and intelligent capitalization + (sentence beginnings, proper nouns, acronyms, brand names). To enable + enrichment, add the `enrichments=punctuation` parameter to your recognition + request. Supported languages include English (US, UK, Australia, India), + French (France, Canada), German, Italian, Portuguese (Brazil, Portugal), + Spanish (Spain, Latin America, Argentina, Chile, Colombia, Mexico, Peru), + and Japanese. See [Speech transcript + enrichment](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-speech-transcript-enrichment). + :param str language_customization_id: (optional) The customization ID + (GUID) of a custom language model that is to be used with the recognition + request. The base model of the specified custom language model must match + the model specified with the `model` parameter. You must make the request + with credentials for the instance of the service that owns the custom + model. By default, no custom language model is used. See [Using a custom + language model for speech + recognition](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-languageUse). + **Note:** Use this parameter instead of the deprecated `customization_id` + parameter. + :param str acoustic_customization_id: (optional) The customization ID + (GUID) of a custom acoustic model that is to be used with the recognition + request. The base model of the specified custom acoustic model must match + the model specified with the `model` parameter. You must make the request + with credentials for the instance of the service that owns the custom + model. By default, no custom acoustic model is used. See [Using a custom + acoustic model for speech + recognition](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-acousticUse). + :param str base_model_version: (optional) The version of the specified base + model that is to be used with the recognition request. Multiple versions of + a base model can exist when a model is updated for internal improvements. + The parameter is intended primarily for use with custom models that have + been upgraded for a new base model. The default value depends on whether + the parameter is used with or without a custom model. See [Making speech + recognition requests with upgraded custom + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-upgrade-use#custom-upgrade-use-recognition). + :param float customization_weight: (optional) If you specify the + customization ID (GUID) of a custom language model with the recognition + request, the customization weight tells the service how much weight to give + to words from the custom language model compared to those from the base + model for the current request. + Specify a value between 0.0 and 1.0. Unless a different customization + weight was specified for the custom model when the model was trained, the + default value is: + * 0.5 for large speech models + * 0.3 for previous-generation models + * 0.2 for most next-generation models + * 0.1 for next-generation English and Japanese models + A customization weight that you specify overrides a weight that was + specified when the custom model was trained. The default value yields the + best performance in general. Assign a higher value if your audio makes + frequent use of OOV words from the custom model. Use caution when setting + the weight: a higher value can improve the accuracy of phrases from the + custom model's domain, but it can negatively affect performance on + non-domain phrases. + See [Using customization + weight](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-languageUse#weight). + :param int inactivity_timeout: (optional) The time in seconds after which, + if only silence (no speech) is detected in streaming audio, the connection + is closed with a 400 error. The parameter is useful for stopping audio + submission from a live microphone when a user simply walks away. Use `-1` + for infinity. See [Inactivity + timeout](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#timeouts-inactivity). + :param List[str] keywords: (optional) An array of keyword strings to spot + in the audio. Each keyword string can include one or more string tokens. + Keywords are spotted only in the final results, not in interim hypotheses. + If you specify any keywords, you must also specify a keywords threshold. + Omit the parameter or specify an empty array if you do not need to spot + keywords. + You can spot a maximum of 1000 keywords with a single request. A single + keyword can have a maximum length of 1024 characters, though the maximum + effective length for double-byte languages might be shorter. Keywords are + case-insensitive. + See [Keyword + spotting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-spotting#keyword-spotting). + :param float keywords_threshold: (optional) A confidence value that is the + lower bound for spotting a keyword. A word is considered to match a keyword + if its confidence is greater than or equal to the threshold. Specify a + probability between 0.0 and 1.0. If you specify a threshold, you must also + specify one or more keywords. The service performs no keyword spotting if + you omit either parameter. See [Keyword + spotting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-spotting#keyword-spotting). + :param int max_alternatives: (optional) The maximum number of alternative + transcripts that the service is to return. By default, the service returns + a single transcript. If you specify a value of `0`, the service uses the + default value, `1`. See [Maximum + alternatives](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metadata#max-alternatives). + :param float word_alternatives_threshold: (optional) A confidence value + that is the lower bound for identifying a hypothesis as a possible word + alternative (also known as "Confusion Networks"). An alternative word is + considered if its confidence is greater than or equal to the threshold. + Specify a probability between 0.0 and 1.0. By default, the service computes + no alternative words. See [Word + alternatives](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-spotting#word-alternatives). + :param bool word_confidence: (optional) If `true`, the service returns a + confidence measure in the range of 0.0 to 1.0 for each word. By default, + the service returns no word confidence scores. See [Word + confidence](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metadata#word-confidence). + :param bool timestamps: (optional) If `true`, the service returns time + alignment for each word. By default, no timestamps are returned. See [Word + timestamps](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metadata#word-timestamps). + :param bool profanity_filter: (optional) If `true`, the service filters + profanity from all output except for keyword results by replacing + inappropriate words with a series of asterisks. Set the parameter to + `false` to return results with no censoring. + **Note:** The parameter can be used with US English and Japanese + transcription only. See [Profanity + filtering](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-formatting#profanity-filtering). + :param bool smart_formatting: (optional) If `true`, the service converts + dates, times, series of digits and numbers, phone numbers, currency values, + and internet addresses into more readable, conventional representations in + the final transcript of a recognition request. For US English, the service + also converts certain keyword strings to punctuation symbols. By default, + the service performs no smart formatting. + **Note:** The parameter can be used with US English, Japanese, and Spanish + (all dialects) transcription only. + See [Smart + formatting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-formatting#smart-formatting). + :param int smart_formatting_version: (optional) Smart formatting version + for large speech models and next-generation models is supported in US + English, Brazilian Portuguese, French, German, Spanish and French Canadian + languages. + :param bool speaker_labels: (optional) If `true`, the response includes + labels that identify which words were spoken by which participants in a + multi-person exchange. By default, the service returns no speaker labels. + Setting `speaker_labels` to `true` forces the `timestamps` parameter to be + `true`, regardless of whether you specify `false` for the parameter. + * _For previous-generation models,_ the parameter can be used with + Australian English, US English, German, Japanese, Korean, and Spanish (both + broadband and narrowband models) and UK English (narrowband model) + transcription only. + * _For large speech models and next-generation models,_ the parameter can + be used with all available languages. + See [Speaker + labels](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-speaker-labels). + :param str grammar_name: (optional) The name of a grammar that is to be + used with the recognition request. If you specify a grammar, you must also + use the `language_customization_id` parameter to specify the name of the + custom language model for which the grammar is defined. The service + recognizes only strings that are recognized by the specified grammar; it + does not recognize other custom words from the model's words resource. + See [Using a grammar for speech + recognition](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-grammarUse). + :param bool redaction: (optional) If `true`, the service redacts, or masks, + numeric data from final transcripts. The feature redacts any number that + has three or more consecutive digits by replacing each digit with an `X` + character. It is intended to redact sensitive numeric data, such as credit + card numbers. By default, the service performs no redaction. + When you enable redaction, the service automatically enables smart + formatting, regardless of whether you explicitly disable that feature. To + ensure maximum security, the service also disables keyword spotting + (ignores the `keywords` and `keywords_threshold` parameters) and returns + only a single final transcript (forces the `max_alternatives` parameter to + be `1`). + **Note:** The parameter can be used with US English, Japanese, and Korean + transcription only. + See [Numeric + redaction](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-formatting#numeric-redaction). + :param bool processing_metrics: (optional) If `true`, requests processing + metrics about the service's transcription of the input audio. The service + returns processing metrics at the interval specified by the + `processing_metrics_interval` parameter. It also returns processing metrics + for transcription events, for example, for final and interim results. By + default, the service returns no processing metrics. + See [Processing + metrics](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metrics#processing-metrics). + :param float processing_metrics_interval: (optional) Specifies the interval + in real wall-clock seconds at which the service is to return processing + metrics. The parameter is ignored unless the `processing_metrics` parameter + is set to `true`. + The parameter accepts a minimum value of 0.1 seconds. The level of + precision is not restricted, so you can specify values such as 0.25 and + 0.125. + The service does not impose a maximum value. If you want to receive + processing metrics only for transcription events instead of at periodic + intervals, set the value to a large number. If the value is larger than the + duration of the audio, the service returns processing metrics only for + transcription events. + See [Processing + metrics](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metrics#processing-metrics). + :param bool audio_metrics: (optional) If `true`, requests detailed + information about the signal characteristics of the input audio. The + service returns audio metrics with the final transcription results. By + default, the service returns no audio metrics. + See [Audio + metrics](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metrics#audio-metrics). + :param float end_of_phrase_silence_time: (optional) Specifies the duration + of the pause interval at which the service splits a transcript into + multiple final results. If the service detects pauses or extended silence + before it reaches the end of the audio stream, its response can include + multiple final results. Silence indicates a point at which the speaker + pauses between spoken words or phrases. + Specify a value for the pause interval in the range of 0.0 to 120.0. + * A value greater than 0 specifies the interval that the service is to use + for speech recognition. + * A value of 0 indicates that the service is to use the default interval. + It is equivalent to omitting the parameter. + The default pause interval for most languages is 0.8 seconds; the default + for Chinese is 0.6 seconds. + See [End of phrase silence + time](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-parsing#silence-time). + :param bool split_transcript_at_phrase_end: (optional) If `true`, directs + the service to split the transcript into multiple final results based on + semantic features of the input, for example, at the conclusion of + meaningful phrases such as sentences. The service bases its understanding + of semantic features on the base language model that you use with a + request. Custom language models and grammars can also influence how and + where the service splits a transcript. + By default, the service splits transcripts based solely on the pause + interval. If the parameters are used together on the same request, + `end_of_phrase_silence_time` has precedence over + `split_transcript_at_phrase_end`. + See [Split transcript at phrase + end](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-parsing#split-transcript). + :param float speech_detector_sensitivity: (optional) The sensitivity of + speech activity detection that the service is to perform. Use the parameter + to suppress word insertions from music, coughing, and other non-speech + events. The service biases the audio it passes for speech recognition by + evaluating the input audio against prior models of speech and non-speech + activity. + Specify a value between 0.0 and 1.0: + * 0.0 suppresses all audio (no speech is transcribed). + * 0.5 (the default) provides a reasonable compromise for the level of + sensitivity. + * 1.0 suppresses no audio (speech detection sensitivity is disabled). + The values increase on a monotonic curve. Specifying one or two decimal + places of precision (for example, `0.55`) is typically more than + sufficient. + The parameter is supported with all large speech models, next-generation + models and with most previous-generation models. See [Speech detector + sensitivity](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-detection#detection-parameters-sensitivity) + and [Language model + support](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-detection#detection-support). + :param int sad_module: (optional) Detects speech boundaries within the + audio stream with better performance, improved noise suppression, faster + responsiveness, and increased accuracy. + Specify `sad_module: 2` + See [Speech Activity Detection + (SAD)](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-detection#sad). + :param float background_audio_suppression: (optional) The level to which + the service is to suppress background audio based on its volume to prevent + it from being transcribed as speech. Use the parameter to suppress side + conversations or background noise. + Specify a value in the range of 0.0 to 1.0: + * 0.0 (the default) provides no suppression (background audio suppression + is disabled). + * 0.5 provides a reasonable level of audio suppression for general usage. + * 1.0 suppresses all audio (no audio is transcribed). + The values increase on a monotonic curve. Specifying one or two decimal + places of precision (for example, `0.55`) is typically more than + sufficient. + The parameter is supported with all large speech models, next-generation + models and with most previous-generation models. See [Background audio + suppression](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-detection#detection-parameters-suppression) + and [Language model + support](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-detection#detection-support). + :param bool low_latency: (optional) If `true` for next-generation + `Multimedia` and `Telephony` models that support low latency, directs the + service to produce results even more quickly than it usually does. + Next-generation models produce transcription results faster than + previous-generation models. The `low_latency` parameter causes the models + to produce results even more quickly, though the results might be less + accurate when the parameter is used. + The parameter is not available for large speech models and + previous-generation `Broadband` and `Narrowband` models. It is available + for most next-generation models. + * For a list of next-generation models that support low latency, see + [Supported next-generation language + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng#models-ng-supported). + * For more information about the `low_latency` parameter, see [Low + latency](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-interim#low-latency). + :param float character_insertion_bias: (optional) For large speech models + and next-generation models, an indication of whether the service is biased + to recognize shorter or longer strings of characters when developing + transcription hypotheses. By default, the service is optimized to produce + the best balance of strings of different lengths. + The default bias is 0.0. The allowable range of values is -1.0 to 1.0. + * Negative values bias the service to favor hypotheses with shorter strings + of characters. + * Positive values bias the service to favor hypotheses with longer strings + of characters. + As the value approaches -1.0 or 1.0, the impact of the parameter becomes + more pronounced. To determine the most effective value for your scenario, + start by setting the value of the parameter to a small increment, such as + -0.1, -0.05, 0.05, or 0.1, and assess how the value impacts the + transcription results. Then experiment with different values as necessary, + adjusting the value by small increments. + The parameter is not available for previous-generation models. + See [Character insertion + bias](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-parsing#insertion-bias). :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `RecognitionJob` object """ if audio is None: raise ValueError('audio must be provided') - - headers = {'Content-Type': content_type} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', 'create_job') + headers = { + 'Content-Type': content_type, + } + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='create_job', + ) headers.update(sdk_headers) params = { @@ -803,12 +1367,14 @@ def create_job(self, 'events': events, 'user_token': user_token, 'results_ttl': results_ttl, + 'speech_begin_event': speech_begin_event, + 'enrichments': enrichments, 'language_customization_id': language_customization_id, 'acoustic_customization_id': acoustic_customization_id, 'base_model_version': base_model_version, 'customization_weight': customization_weight, 'inactivity_timeout': inactivity_timeout, - 'keywords': self._convert_list(keywords), + 'keywords': convert_list(keywords), 'keywords_threshold': keywords_threshold, 'max_alternatives': max_alternatives, 'word_alternatives_threshold': word_alternatives_threshold, @@ -816,175 +1382,208 @@ def create_job(self, 'timestamps': timestamps, 'profanity_filter': profanity_filter, 'smart_formatting': smart_formatting, + 'smart_formatting_version': smart_formatting_version, 'speaker_labels': speaker_labels, - 'customization_id': customization_id, 'grammar_name': grammar_name, - 'redaction': redaction + 'redaction': redaction, + 'processing_metrics': processing_metrics, + 'processing_metrics_interval': processing_metrics_interval, + 'audio_metrics': audio_metrics, + 'end_of_phrase_silence_time': end_of_phrase_silence_time, + 'split_transcript_at_phrase_end': split_transcript_at_phrase_end, + 'speech_detector_sensitivity': speech_detector_sensitivity, + 'sad_module': sad_module, + 'background_audio_suppression': background_audio_suppression, + 'low_latency': low_latency, + 'character_insertion_bias': character_insertion_bias, } data = audio + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + url = '/v1/recognitions' - response = self.request( + request = self.prepare_request( method='POST', url=url, headers=headers, params=params, data=data, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response - def delete_job(self, id, **kwargs): + def check_jobs( + self, + **kwargs, + ) -> DetailedResponse: """ - Delete a job. + Check jobs. - Deletes the specified job. You cannot delete a job that the service is actively - processing. Once you delete a job, its results are no longer available. The - service automatically deletes a job and its results when the time to live for the - results expires. You must use credentials for the instance of the service that - owns a job to delete it. - **See also:** [Deleting a - job](https://cloud.ibm.com/docs/services/speech-to-text/async.html#delete-async). + Returns the ID and status of the latest 100 outstanding jobs associated with the + credentials with which it is called. The method also returns the creation and + update times of each job, and, if a job was created with a callback URL and a user + token, the user token for the job. To obtain the results for a job whose status is + `completed` or not one of the latest 100 outstanding jobs, use the [Check a + job[(#checkjob) method. A job and its results remain available until you delete + them with the [Delete a job](#deletejob) method or until the job's time to live + expires, whichever comes first. + **See also:** [Checking the status of the latest + jobs](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-async#jobs). - :param str id: The identifier of the asynchronous job that is to be used for the - request. You must make the request with credentials for the instance of the - service that owns the job. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `RecognitionJobs` object """ - if id is None: - raise ValueError('id must be provided') - headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='check_jobs', + ) + headers.update(sdk_headers) + if 'headers' in kwargs: headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', 'delete_job') - headers.update(sdk_headers) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + url = '/v1/recognitions' + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + ) - url = '/v1/recognitions/{0}'.format(*self._encode_path_vars(id)) - response = self.request( - method='DELETE', url=url, headers=headers, accept_json=False) + response = self.send(request, **kwargs) return response - def register_callback(self, callback_url, user_secret=None, **kwargs): + def check_job( + self, + id: str, + **kwargs, + ) -> DetailedResponse: """ - Register a callback. + Check a job. - Registers a callback URL with the service for use with subsequent asynchronous - recognition requests. The service attempts to register, or white-list, the - callback URL if it is not already registered by sending a `GET` request to the - callback URL. The service passes a random alphanumeric challenge string via the - `challenge_string` parameter of the request. The request includes an `Accept` - header that specifies `text/plain` as the required response type. - To be registered successfully, the callback URL must respond to the `GET` request - from the service. The response must send status code 200 and must include the - challenge string in its body. Set the `Content-Type` response header to - `text/plain`. Upon receiving this response, the service responds to the original - registration request with response code 201. - The service sends only a single `GET` request to the callback URL. If the service - does not receive a reply with a response code of 200 and a body that echoes the - challenge string sent by the service within five seconds, it does not white-list - the URL; it instead sends status code 400 in response to the **Register a - callback** request. If the requested callback URL is already white-listed, the - service responds to the initial registration request with response code 200. - If you specify a user secret with the request, the service uses it as a key to - calculate an HMAC-SHA1 signature of the challenge string in its response to the - `POST` request. It sends this signature in the `X-Callback-Signature` header of - its `GET` request to the URL during registration. It also uses the secret to - calculate a signature over the payload of every callback notification that uses - the URL. The signature provides authentication and data integrity for HTTP - communications. - After you successfully register a callback URL, you can use it with an indefinite - number of recognition requests. You can register a maximum of 20 callback URLS in - a one-hour span of time. - **See also:** [Registering a callback - URL](https://cloud.ibm.com/docs/services/speech-to-text/async.html#register). - - :param str callback_url: An HTTP or HTTPS URL to which callback notifications are - to be sent. To be white-listed, the URL must successfully echo the challenge - string during URL verification. During verification, the client can also check the - signature that the service sends in the `X-Callback-Signature` header to verify - the origin of the request. - :param str user_secret: A user-specified string that the service uses to generate - the HMAC-SHA1 signature that it sends via the `X-Callback-Signature` header. The - service includes the header during URL verification and with every notification - sent to the callback URL. It calculates the signature over the payload of the - notification. If you omit the parameter, the service does not send the header. + Returns information about the specified job. The response always includes the + status of the job and its creation and update times. If the status is `completed`, + the response includes the results of the recognition request. You must use + credentials for the instance of the service that owns a job to list information + about it. + You can use the method to retrieve the results of any job, regardless of whether + it was submitted with a callback URL and the `recognitions.completed_with_results` + event, and you can retrieve the results multiple times for as long as they remain + available. Use the [Check jobs](#checkjobs) method to request information about + the most recent jobs associated with the calling credentials. + **See also:** [Checking the status and retrieving the results of a + job](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-async#job). + + :param str id: The identifier of the asynchronous job that is to be used + for the request. You must make the request with credentials for the + instance of the service that owns the job. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `RecognitionJob` object """ - if callback_url is None: - raise ValueError('callback_url must be provided') - + if not id: + raise ValueError('id must be provided') headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', - 'register_callback') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='check_job', + ) headers.update(sdk_headers) - params = {'callback_url': callback_url, 'user_secret': user_secret} - - url = '/v1/register_callback' - response = self.request( - method='POST', + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['id'] + path_param_values = self.encode_path_vars(id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/recognitions/{id}'.format(**path_param_dict) + request = self.prepare_request( + method='GET', url=url, headers=headers, - params=params, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response - def unregister_callback(self, callback_url, **kwargs): + def delete_job( + self, + id: str, + **kwargs, + ) -> DetailedResponse: """ - Unregister a callback. + Delete a job. - Unregisters a callback URL that was previously white-listed with a **Register a - callback** request for use with the asynchronous interface. Once unregistered, the - URL can no longer be used with asynchronous recognition requests. - **See also:** [Unregistering a callback - URL](https://cloud.ibm.com/docs/services/speech-to-text/async.html#unregister). + Deletes the specified job. You cannot delete a job that the service is actively + processing. Once you delete a job, its results are no longer available. The + service automatically deletes a job and its results when the time to live for the + results expires. You must use credentials for the instance of the service that + owns a job to delete it. + **See also:** [Deleting a + job](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-async#delete-async). - :param str callback_url: The callback URL that is to be unregistered. + :param str id: The identifier of the asynchronous job that is to be used + for the request. You must make the request with credentials for the + instance of the service that owns the job. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ - if callback_url is None: - raise ValueError('callback_url must be provided') - + if not id: + raise ValueError('id must be provided') headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', - 'unregister_callback') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='delete_job', + ) headers.update(sdk_headers) - params = {'callback_url': callback_url} + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] - url = '/v1/unregister_callback' - response = self.request( - method='POST', + path_param_keys = ['id'] + path_param_values = self.encode_path_vars(id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/recognitions/{id}'.format(**path_param_dict) + request = self.prepare_request( + method='DELETE', url=url, headers=headers, - params=params, - accept_json=False) + ) + + response = self.send(request, **kwargs) return response ######################### # Custom language models ######################### - def create_language_model(self, - name, - base_model_name, - dialect=None, - description=None, - **kwargs): + def create_language_model( + self, + name: str, + base_model_name: str, + *, + dialect: Optional[str] = None, + description: Optional[str] = None, + **kwargs, + ) -> DetailedResponse: """ Create a custom language model. @@ -992,215 +1591,314 @@ def create_language_model(self, language model can be used only with the base model for which it is created. The model is owned by the instance of the service whose credentials are used to create it. - **See also:** [Create a custom language - model](https://cloud.ibm.com/docs/services/speech-to-text/language-create.html#createModel-language). - - :param str name: A user-defined name for the new custom language model. Use a name - that is unique among all custom language models that you own. Use a localized name - that matches the language of the custom model. Use a name that describes the - domain of the custom model, such as `Medical custom model` or `Legal custom - model`. - :param str base_model_name: The name of the base language model that is to be - customized by the new custom language model. The new custom model can be used only - with the base model that it customizes. - To determine whether a base model supports language model customization, use the - **Get a model** method and check that the attribute `custom_language_model` is set - to `true`. You can also refer to [Language support for - customization](https://cloud.ibm.com/docs/services/speech-to-text/custom.html#languageSupport). - :param str dialect: The dialect of the specified language that is to be used with - the custom language model. The parameter is meaningful only for Spanish models, - for which the service creates a custom language model that is suited for speech in - one of the following dialects: - * `es-ES` for Castilian Spanish (the default) - * `es-LA` for Latin American Spanish - * `es-US` for North American (Mexican) Spanish - A specified dialect must be valid for the base model. By default, the dialect - matches the language of the base model; for example, `en-US` for either of the US - English language models. - :param str description: A description of the new custom language model. Use a - localized description that matches the language of the custom model. + You can create a maximum of 1024 custom language models per owning credentials. + The service returns an error if you attempt to create more than 1024 models. You + do not lose any models, but you cannot create any more until your model count is + below the limit. + **Important:** Effective **31 July 2023**, all previous-generation models will be + removed from the service and the documentation. Most previous-generation models + were deprecated on 15 March 2022. You must migrate to the equivalent large speech + model or next-generation model by 31 July 2023. For more information, see + [Migrating to large speech + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-migrate). + **See also:** + * [Create a custom language + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-languageCreate#createModel-language) + * [Language support for + customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-support) + ### Large speech models and Next-generation models + The service supports large speech models and next-generation `Multimedia` (16 + kHz) and `Telephony` (8 kHz) models for many languages. Large speech models and + next-generation models have higher throughput than the service's previous + generation of `Broadband` and `Narrowband` models. When you use large speech + models and next-generation models, the service can return transcriptions more + quickly and also provide noticeably better transcription accuracy. + You specify a large speech model or next-generation model by using the `model` + query parameter, as you do a previous-generation model. Only the next-generation + models support the `low_latency` parameter, and all large speech models and + next-generation models support the `character_insertion_bias` parameter. These + parameters are not available with previous-generation models. + Large speech models and next-generation models do not support all of the speech + recognition parameters that are available for use with previous-generation models. + Next-generation models do not support the following parameters: + * `acoustic_customization_id` + * `keywords` and `keywords_threshold` + * `processing_metrics` and `processing_metrics_interval` + * `word_alternatives_threshold` + **Important:** Effective **31 July 2023**, all previous-generation models will be + removed from the service and the documentation. Most previous-generation models + were deprecated on 15 March 2022. You must migrate to the equivalent large speech + model or next-generation model by 31 July 2023. For more information, see + [Migrating to large speech + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-migrate). + **See also:** + * [Large speech languages and + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-large-speech-languages) + * [Supported features for large speech + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-large-speech-languages#models-lsm-supported-features) + * [Next-generation languages and + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng) + * [Supported features for next-generation + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng#models-ng-features). + + :param str name: A user-defined name for the new custom language model. Use + a localized name that matches the language of the custom model. Use a name + that describes the domain of the custom model, such as `Medical custom + model` or `Legal custom model`. Use a name that is unique among all custom + language models that you own. + Include a maximum of 256 characters in the name. Do not use backslashes, + slashes, colons, equal signs, ampersands, or question marks in the name. + :param str base_model_name: The name of the base language model that is to + be customized by the new custom language model. The new custom model can be + used only with the base model that it customizes. + To determine whether a base model supports language model customization, + use the [Get a model](#getmodel) method and check that the attribute + `custom_language_model` is set to `true`. You can also refer to [Language + support for + customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-support). + :param str dialect: (optional) The dialect of the specified language that + is to be used with the custom language model. _For all languages, it is + always safe to omit this field._ The service automatically uses the + language identifier from the name of the base model. For example, the + service automatically uses `en-US` for all US English models. + If you specify the `dialect` for a new custom model, follow these + guidelines. _For non-Spanish previous-generation models and for + next-generation models,_ you must specify a value that matches the + five-character language identifier from the name of the base model. _For + Spanish previous-generation models,_ you must specify one of the following + values: + * `es-ES` for Castilian Spanish (`es-ES` models) + * `es-LA` for Latin American Spanish (`es-AR`, `es-CL`, `es-CO`, and + `es-PE` models) + * `es-US` for Mexican (North American) Spanish (`es-MX` models) + All values that you pass for the `dialect` field are case-insensitive. + :param str description: (optional) A recommended description of the new + custom language model. Use a localized description that matches the + language of the custom model. Include a maximum of 128 characters in the + description. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `LanguageModel` object """ if name is None: raise ValueError('name must be provided') if base_model_name is None: raise ValueError('base_model_name must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', - 'create_language_model') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='create_language_model', + ) headers.update(sdk_headers) data = { 'name': name, 'base_model_name': base_model_name, 'dialect': dialect, - 'description': description + 'description': description, } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' url = '/v1/customizations' - response = self.request( + request = self.prepare_request( method='POST', url=url, headers=headers, - json=data, - accept_json=True) + data=data, + ) + + response = self.send(request, **kwargs) return response - def delete_language_model(self, customization_id, **kwargs): + def list_language_models( + self, + *, + language: Optional[str] = None, + **kwargs, + ) -> DetailedResponse: """ - Delete a custom language model. - - Deletes an existing custom language model. The custom model cannot be deleted if - another request, such as adding a corpus or grammar to the model, is currently - being processed. You must use credentials for the instance of the service that - owns a model to delete it. - **See also:** [Deleting a custom language - model](https://cloud.ibm.com/docs/services/speech-to-text/language-models.html#deleteModel-language). + List custom language models. - :param str customization_id: The customization ID (GUID) of the custom language - model that is to be used for the request. You must make the request with - credentials for the instance of the service that owns the custom model. + Lists information about all custom language models that are owned by an instance + of the service. Use the `language` parameter to see all custom language models for + the specified language. Omit the parameter to see all custom language models for + all languages. You must use credentials for the instance of the service that owns + a model to list information about it. + **See also:** + * [Listing custom language + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageLanguageModels#listModels-language) + * [Language support for + customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-support). + + :param str language: (optional) The identifier of the language for which + custom language or custom acoustic models are to be returned. Specify the + five-character language identifier; for example, specify `en-US` to see all + custom language or custom acoustic models that are based on US English + models. Omit the parameter to see all custom language or custom acoustic + models that are owned by the requesting credentials. + To determine the languages for which customization is available, see + [Language support for + customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-support). :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `LanguageModels` object """ - if customization_id is None: - raise ValueError('customization_id must be provided') - headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='list_language_models', + ) + headers.update(sdk_headers) + + params = { + 'language': language, + } + if 'headers' in kwargs: headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', - 'delete_language_model') - headers.update(sdk_headers) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + url = '/v1/customizations' + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) - url = '/v1/customizations/{0}'.format( - *self._encode_path_vars(customization_id)) - response = self.request( - method='DELETE', url=url, headers=headers, accept_json=True) + response = self.send(request, **kwargs) return response - def get_language_model(self, customization_id, **kwargs): + def get_language_model( + self, + customization_id: str, + **kwargs, + ) -> DetailedResponse: """ Get a custom language model. Gets information about a specified custom language model. You must use credentials for the instance of the service that owns a model to list information about it. - **See also:** [Listing custom language - models](https://cloud.ibm.com/docs/services/speech-to-text/language-models.html#listModels-language). - - :param str customization_id: The customization ID (GUID) of the custom language - model that is to be used for the request. You must make the request with - credentials for the instance of the service that owns the custom model. + **See also:** + * [Listing custom language + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageLanguageModels#listModels-language) + * [Language support for + customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-support). + + :param str customization_id: The customization ID (GUID) of the custom + language model that is to be used for the request. You must make the + request with credentials for the instance of the service that owns the + custom model. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `LanguageModel` object """ - if customization_id is None: + if not customization_id: raise ValueError('customization_id must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', - 'get_language_model') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='get_language_model', + ) headers.update(sdk_headers) - url = '/v1/customizations/{0}'.format( - *self._encode_path_vars(customization_id)) - response = self.request( - method='GET', url=url, headers=headers, accept_json=True) - return response - - def list_language_models(self, language=None, **kwargs): - """ - List custom language models. - - Lists information about all custom language models that are owned by an instance - of the service. Use the `language` parameter to see all custom language models for - the specified language. Omit the parameter to see all custom language models for - all languages. You must use credentials for the instance of the service that owns - a model to list information about it. - **See also:** [Listing custom language - models](https://cloud.ibm.com/docs/services/speech-to-text/language-models.html#listModels-language). - - :param str language: The identifier of the language for which custom language or - custom acoustic models are to be returned (for example, `en-US`). Omit the - parameter to see all custom language or custom acoustic models that are owned by - the requesting credentials. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - headers = {} if 'headers' in kwargs: headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', - 'list_language_models') - headers.update(sdk_headers) - - params = {'language': language} - - url = '/v1/customizations' - response = self.request( + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id'] + path_param_values = self.encode_path_vars(customization_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/customizations/{customization_id}'.format(**path_param_dict) + request = self.prepare_request( method='GET', url=url, headers=headers, - params=params, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response - def reset_language_model(self, customization_id, **kwargs): + def delete_language_model( + self, + customization_id: str, + **kwargs, + ) -> DetailedResponse: """ - Reset a custom language model. - - Resets a custom language model by removing all corpora, grammars, and words from - the model. Resetting a custom language model initializes the model to its state - when it was first created. Metadata such as the name and language of the model are - preserved, but the model's words resource is removed and must be re-created. You - must use credentials for the instance of the service that owns a model to reset - it. - **See also:** [Resetting a custom language - model](https://cloud.ibm.com/docs/services/speech-to-text/language-models.html#resetModel-language). + Delete a custom language model. - :param str customization_id: The customization ID (GUID) of the custom language - model that is to be used for the request. You must make the request with - credentials for the instance of the service that owns the custom model. + Deletes an existing custom language model. The custom model cannot be deleted if + another request, such as adding a corpus or grammar to the model, is currently + being processed. You must use credentials for the instance of the service that + owns a model to delete it. + **See also:** + * [Deleting a custom language + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageLanguageModels#deleteModel-language) + * [Language support for + customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-support). + + :param str customization_id: The customization ID (GUID) of the custom + language model that is to be used for the request. You must make the + request with credentials for the instance of the service that owns the + custom model. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ - if customization_id is None: + if not customization_id: raise ValueError('customization_id must be provided') - headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='delete_language_model', + ) + headers.update(sdk_headers) + if 'headers' in kwargs: headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', - 'reset_language_model') - headers.update(sdk_headers) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id'] + path_param_values = self.encode_path_vars(customization_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/customizations/{customization_id}'.format(**path_param_dict) + request = self.prepare_request( + method='DELETE', + url=url, + headers=headers, + ) - url = '/v1/customizations/{0}/reset'.format( - *self._encode_path_vars(customization_id)) - response = self.request( - method='POST', url=url, headers=headers, accept_json=True) + response = self.send(request, **kwargs) return response - def train_language_model(self, - customization_id, - word_type_to_add=None, - customization_weight=None, - **kwargs): + def train_language_model( + self, + customization_id: str, + *, + word_type_to_add: Optional[str] = None, + customization_weight: Optional[float] = None, + strict: Optional[bool] = None, + force: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: """ Train a custom language model. @@ -1215,75 +1913,193 @@ def train_language_model(self, complete depending on the amount of data on which the service is being trained and the current load on the service. The method returns an HTTP 200 response code to indicate that the training process has begun. - You can monitor the status of the training by using the **Get a custom language - model** method to poll the model's status. Use a loop to check the status every 10 - seconds. The method returns a `LanguageModel` object that includes `status` and - `progress` fields. A status of `available` means that the custom model is trained - and ready to use. The service cannot accept subsequent training requests or - requests to add new resources until the existing request completes. - Training can fail to start for the following reasons: + You can monitor the status of the training by using the [Get a custom language + model](#getlanguagemodel) method to poll the model's status. Use a loop to check + the status every 10 seconds. If you added custom words directly to a custom model + that is based on a next-generation model, allow for some minutes of extra training + time for the model. + The method returns a `LanguageModel` object that includes `status` and `progress` + fields. A status of `available` means that the custom model is trained and ready + to use. The service cannot accept subsequent training requests or requests to add + new resources until the existing request completes. + For custom models that are based on improved base language models, training also + performs an automatic upgrade to a newer version of the base model. You do not + need to use the [Upgrade a custom language model](#upgradelanguagemodel) method to + perform the upgrade. + **See also:** + * [Language support for + customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-support) + * [Train the custom language + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-languageCreate#trainModel-language) + * [Upgrading custom language models that are based on improved next-generation + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-upgrade#custom-upgrade-language-ng) + ### Training failures + Training can fail to start for the following reasons: * The service is currently handling another request for the custom model, such as another training request or a request to add a corpus or grammar to the model. * No training data have been added to the custom model. - * One or more words that were added to the custom model have invalid sounds-like - pronunciations that you must fix. - **See also:** [Train the custom language - model](https://cloud.ibm.com/docs/services/speech-to-text/language-create.html#trainModel-language). - - :param str customization_id: The customization ID (GUID) of the custom language - model that is to be used for the request. You must make the request with - credentials for the instance of the service that owns the custom model. - :param str word_type_to_add: The type of words from the custom language model's - words resource on which to train the model: - * `all` (the default) trains the model on all new words, regardless of whether - they were extracted from corpora or grammars or were added or modified by the - user. - * `user` trains the model only on new words that were added or modified by the - user directly. The model is not trained on new words extracted from corpora or - grammars. - :param float customization_weight: Specifies a customization weight for the custom - language model. The customization weight tells the service how much weight to give - to words from the custom language model compared to those from the base model for - speech recognition. Specify a value between 0.0 and 1.0; the default is 0.3. - The default value yields the best performance in general. Assign a higher value if - your audio makes frequent use of OOV words from the custom model. Use caution when - setting the weight: a higher value can improve the accuracy of phrases from the - custom model's domain, but it can negatively affect performance on non-domain - phrases. - The value that you assign is used for all recognition requests that use the model. - You can override it for any recognition request by specifying a customization - weight for that request. + * The custom model contains one or more invalid corpora, grammars, or words (for + example, a custom word has an invalid sounds-like pronunciation). You can correct + the invalid resources or set the `strict` parameter to `false` to exclude the + invalid resources from the training. The model must contain at least one valid + resource for training to succeed. + + :param str customization_id: The customization ID (GUID) of the custom + language model that is to be used for the request. You must make the + request with credentials for the instance of the service that owns the + custom model. + :param str word_type_to_add: (optional) _For custom models that are based + on previous-generation models_, the type of words from the custom language + model's words resource on which to train the model: + * `all` (the default) trains the model on all new words, regardless of + whether they were extracted from corpora or grammars or were added or + modified by the user. + * `user` trains the model only on custom words that were added or modified + by the user directly. The model is not trained on new words extracted from + corpora or grammars. + _For custom models that are based on large speech models and + next-generation models_, the service ignores the `word_type_to_add` + parameter. The words resource contains only custom words that the user adds + or modifies directly, so the parameter is unnecessary. + :param float customization_weight: (optional) Specifies a customization + weight for the custom language model. The customization weight tells the + service how much weight to give to words from the custom language model + compared to those from the base model for speech recognition. Specify a + value between 0.0 and 1.0. The default value is: + * 0.5 for large speech models + * 0.3 for previous-generation models + * 0.2 for most next-generation models + * 0.1 for next-generation English and Japanese models + The default value yields the best performance in general. Assign a higher + value if your audio makes frequent use of OOV words from the custom model. + Use caution when setting the weight: a higher value can improve the + accuracy of phrases from the custom model's domain, but it can negatively + affect performance on non-domain phrases. + The value that you assign is used for all recognition requests that use the + model. You can override it for any recognition request by specifying a + customization weight for that request. + See [Using customization + weight](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-languageUse#weight). + :param bool strict: (optional) If `false`, allows training of the custom + language model to proceed as long as the model contains at least one valid + resource. The method returns an array of `TrainingWarning` objects that + lists any invalid resources. By default (`true`), training of a custom + language model fails (status code 400) if the model contains one or more + invalid resources (corpus files, grammar files, or custom words). + :param bool force: (optional) If `true`, forces the training of the custom + language model regardless of whether it contains any changes (is in the + `ready` or `available` state). By default (`false`), the model must be in + the `ready` state to be trained. You can use the parameter to train and + thus upgrade a custom model that is based on an improved next-generation + model. *The parameter is available only for IBM Cloud, not for IBM Cloud + Pak for Data.* + See [Upgrading a custom language model based on an improved next-generation + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-upgrade#custom-upgrade-language-ng). :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `TrainingResponse` object """ - if customization_id is None: + if not customization_id: raise ValueError('customization_id must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', - 'train_language_model') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='train_language_model', + ) headers.update(sdk_headers) params = { 'word_type_to_add': word_type_to_add, - 'customization_weight': customization_weight + 'customization_weight': customization_weight, + 'strict': strict, + 'force': force, } - url = '/v1/customizations/{0}/train'.format( - *self._encode_path_vars(customization_id)) - response = self.request( + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id'] + path_param_values = self.encode_path_vars(customization_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/customizations/{customization_id}/train'.format( + **path_param_dict) + request = self.prepare_request( method='POST', url=url, headers=headers, params=params, - accept_json=True) + ) + + response = self.send(request, **kwargs) + return response + + def reset_language_model( + self, + customization_id: str, + **kwargs, + ) -> DetailedResponse: + """ + Reset a custom language model. + + Resets a custom language model by removing all corpora, grammars, and words from + the model. Resetting a custom language model initializes the model to its state + when it was first created. Metadata such as the name and language of the model are + preserved, but the model's words resource is removed and must be re-created. You + must use credentials for the instance of the service that owns a model to reset + it. + **See also:** + * [Resetting a custom language + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageLanguageModels#resetModel-language) + * [Language support for + customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-support). + + :param str customization_id: The customization ID (GUID) of the custom + language model that is to be used for the request. You must make the + request with credentials for the instance of the service that owns the + custom model. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse + """ + + if not customization_id: + raise ValueError('customization_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='reset_language_model', + ) + headers.update(sdk_headers) + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id'] + path_param_values = self.encode_path_vars(customization_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/customizations/{customization_id}/reset'.format( + **path_param_dict) + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + ) + + response = self.send(request, **kwargs) return response - def upgrade_language_model(self, customization_id, **kwargs): + def upgrade_language_model( + self, + customization_id: str, + **kwargs, + ) -> DetailedResponse: """ Upgrade a custom language model. @@ -1295,49 +2111,130 @@ def upgrade_language_model(self, customization_id, **kwargs): that owns a model to upgrade it. The method returns an HTTP 200 response code to indicate that the upgrade process has begun successfully. You can monitor the status of the upgrade by using the - **Get a custom language model** method to poll the model's status. The method - returns a `LanguageModel` object that includes `status` and `progress` fields. Use - a loop to check the status every 10 seconds. While it is being upgraded, the - custom model has the status `upgrading`. When the upgrade is complete, the model - resumes the status that it had prior to upgrade. The service cannot accept - subsequent requests for the model until the upgrade completes. - **See also:** [Upgrading a custom language - model](https://cloud.ibm.com/docs/services/speech-to-text/custom-upgrade.html#upgradeLanguage). - - :param str customization_id: The customization ID (GUID) of the custom language - model that is to be used for the request. You must make the request with - credentials for the instance of the service that owns the custom model. + [Get a custom language model](#getlanguagemodel) method to poll the model's + status. The method returns a `LanguageModel` object that includes `status` and + `progress` fields. Use a loop to check the status every 10 seconds. + While it is being upgraded, the custom model has the status `upgrading`. When the + upgrade is complete, the model resumes the status that it had prior to upgrade. + The service cannot accept subsequent requests for the model until the upgrade + completes. + For custom models that are based on improved base language models, the [Train a + custom language model](#trainlanguagemodel) method also performs an automatic + upgrade to a newer version of the base model. You do not need to use the upgrade + method. + **See also:** + * [Language support for + customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-support) + * [Upgrading a custom language + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-upgrade#custom-upgrade-language) + * [Upgrading custom language models that are based on improved next-generation + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-upgrade#custom-upgrade-language-ng). + + :param str customization_id: The customization ID (GUID) of the custom + language model that is to be used for the request. You must make the + request with credentials for the instance of the service that owns the + custom model. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ - if customization_id is None: + if not customization_id: raise ValueError('customization_id must be provided') - headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='upgrade_language_model', + ) + headers.update(sdk_headers) + if 'headers' in kwargs: headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', - 'upgrade_language_model') - headers.update(sdk_headers) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id'] + path_param_values = self.encode_path_vars(customization_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/customizations/{customization_id}/upgrade_model'.format( + **path_param_dict) + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + ) - url = '/v1/customizations/{0}/upgrade_model'.format( - *self._encode_path_vars(customization_id)) - response = self.request( - method='POST', url=url, headers=headers, accept_json=True) + response = self.send(request, **kwargs) return response ######################### # Custom corpora ######################### - def add_corpus(self, - customization_id, - corpus_name, - corpus_file, - allow_overwrite=None, - **kwargs): + def list_corpora( + self, + customization_id: str, + **kwargs, + ) -> DetailedResponse: + """ + List corpora. + + Lists information about all corpora from a custom language model. The information + includes the name, status, and total number of words for each corpus. _For custom + models that are based on previous-generation models_, it also includes the number + of out-of-vocabulary (OOV) words from the corpus. You must use credentials for the + instance of the service that owns a model to list its corpora. + **See also:** [Listing corpora for a custom language + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageCorpora#listCorpora). + + :param str customization_id: The customization ID (GUID) of the custom + language model that is to be used for the request. You must make the + request with credentials for the instance of the service that owns the + custom model. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `Corpora` object + """ + + if not customization_id: + raise ValueError('customization_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='list_corpora', + ) + headers.update(sdk_headers) + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id'] + path_param_values = self.encode_path_vars(customization_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/customizations/{customization_id}/corpora'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + ) + + response = self.send(request, **kwargs) + return response + + def add_corpus( + self, + customization_id: str, + corpus_name: str, + corpus_file: BinaryIO, + *, + allow_overwrite: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: """ Add a corpus. @@ -1345,345 +2242,398 @@ def add_corpus(self, Use multiple requests to submit multiple corpus text files. You must use credentials for the instance of the service that owns a model to add a corpus to it. Adding a corpus does not affect the custom language model until you train the - model for the new data by using the **Train a custom language model** method. + model for the new data by using the [Train a custom language + model](#trainlanguagemodel) method. Submit a plain text file that contains sample sentences from the domain of - interest to enable the service to extract words in context. The more sentences you - add that represent the context in which speakers use words from the domain, the - better the service's recognition accuracy. + interest to enable the service to parse the words in context. The more sentences + you add that represent the context in which speakers use words from the domain, + the better the service's recognition accuracy. The call returns an HTTP 201 response code if the corpus is valid. The service - then asynchronously processes the contents of the corpus and automatically - extracts new words that it finds. This can take on the order of a minute or two to - complete depending on the total number of words and the number of new words in the - corpus, as well as the current load on the service. You cannot submit requests to - add additional resources to the custom model or to train the model until the - service's analysis of the corpus for the current request completes. Use the **List - a corpus** method to check the status of the analysis. - The service auto-populates the model's words resource with words from the corpus - that are not found in its base vocabulary. These are referred to as - out-of-vocabulary (OOV) words. You can use the **List custom words** method to - examine the words resource. You can use other words method to eliminate typos and - modify how words are pronounced as needed. + then asynchronously processes and automatically extracts data from the contents of + the corpus. This operation can take on the order of minutes to complete depending + on the current load on the service, the total number of words in the corpus, and, + _for custom models that are based on previous-generation models_, the number of + new (out-of-vocabulary) words in the corpus. You cannot submit requests to add + additional resources to the custom model or to train the model until the service's + analysis of the corpus for the current request completes. Use the [Get a + corpus](#getcorpus) method to check the status of the analysis. + _For custom models that are based on large speech models_, the service parses and + extracts word sequences from one or multiple corpora files. The characters help + the service learn and predict character sequences from audio. + _For custom models that are based on previous-generation models_, the service + auto-populates the model's words resource with words from the corpus that are not + found in its base vocabulary. These words are referred to as out-of-vocabulary + (OOV) words. After adding a corpus, you must validate the words resource to ensure + that each OOV word's definition is complete and valid. You can use the [List + custom words](#listwords) method to examine the words resource. You can use other + words method to eliminate typos and modify how words are pronounced and displayed + as needed. To add a corpus file that has the same name as an existing corpus, set the `allow_overwrite` parameter to `true`; otherwise, the request fails. Overwriting an existing corpus causes the service to process the corpus text file and extract - OOV words anew. Before doing so, it removes any OOV words associated with the - existing corpus from the model's words resource unless they were also added by - another corpus or grammar, or they have been modified in some way with the **Add - custom words** or **Add a custom word** method. + its data anew. _For a custom model that is based on a previous-generation model_, + the service first removes any OOV words that are associated with the existing + corpus from the model's words resource unless they were also added by another + corpus or grammar, or they have been modified in some way with the [Add custom + words](#addwords) or [Add a custom word](#addword) method. The service limits the overall amount of data that you can add to a custom model - to a maximum of 10 million total words from all sources combined. Also, you can - add no more than 30 thousand custom (OOV) words to a model. This includes words - that the service extracts from corpora and grammars, and words that you add - directly. + to a maximum of 10 million total words from all sources combined. _For a custom + model that is based on a previous-generation model_, you can add no more than 90 + thousand custom (OOV) words to a model. This includes words that the service + extracts from corpora and grammars, and words that you add directly. **See also:** - * [Working with - corpora](https://cloud.ibm.com/docs/services/speech-to-text/language-resource.html#workingCorpora) - * [Add corpora to the custom language - model](https://cloud.ibm.com/docs/services/speech-to-text/language-create.html#addCorpora). - - :param str customization_id: The customization ID (GUID) of the custom language - model that is to be used for the request. You must make the request with - credentials for the instance of the service that owns the custom model. - :param str corpus_name: The name of the new corpus for the custom language model. - Use a localized name that matches the language of the custom model and reflects - the contents of the corpus. - * Include a maximum of 128 characters in the name. - * Do not include spaces, slashes, or backslashes in the name. - * Do not use the name of an existing corpus or grammar that is already defined for - the custom model. - * Do not use the name `user`, which is reserved by the service to denote custom - words that are added or modified by the user. - :param file corpus_file: A plain text file that contains the training data for the - corpus. Encode the file in UTF-8 if it contains non-ASCII characters; the service - assumes UTF-8 encoding if it encounters non-ASCII characters. - Make sure that you know the character encoding of the file. You must use that - encoding when working with the words in the custom language model. For more - information, see [Character - encoding](https://cloud.ibm.com/docs/services/speech-to-text/language-resource.html#charEncoding). - With the `curl` command, use the `--data-binary` option to upload the file for the - request. - :param bool allow_overwrite: If `true`, the specified corpus overwrites an - existing corpus with the same name. If `false`, the request fails if a corpus with - the same name already exists. The parameter has no effect if a corpus with the - same name does not already exist. + * [Add a corpus to the custom language + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-languageCreate#addCorpus) + * [Working with corpora for previous-generation + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-corporaWords#workingCorpora) + * [Working with corpora for large speech models and next-generation + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-corporaWords-ng#workingCorpora-ng) + * [Validating a words resource for previous-generation + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-corporaWords#validateModel) + * [Validating a words resource for large speech models and next-generation + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-corporaWords-ng#validateModel-ng). + + :param str customization_id: The customization ID (GUID) of the custom + language model that is to be used for the request. You must make the + request with credentials for the instance of the service that owns the + custom model. + :param str corpus_name: The name of the new corpus for the custom language + model. Use a localized name that matches the language of the custom model + and reflects the contents of the corpus. + * Include a maximum of 128 characters in the name. + * Do not use characters that need to be URL-encoded. For example, do not + use spaces, slashes, backslashes, colons, ampersands, double quotes, plus + signs, equals signs, questions marks, and so on in the name. (The service + does not prevent the use of these characters. But because they must be + URL-encoded wherever used, their use is strongly discouraged.) + * Do not use the name of an existing corpus or grammar that is already + defined for the custom model. + * Do not use the name `user`, which is reserved by the service to denote + custom words that are added or modified by the user. + * Do not use the name `base_lm` or `default_lm`. Both names are reserved + for future use by the service. + :param BinaryIO corpus_file: A plain text file that contains the training + data for the corpus. Encode the file in UTF-8 if it contains non-ASCII + characters; the service assumes UTF-8 encoding if it encounters non-ASCII + characters. + Make sure that you know the character encoding of the file. You must use + that same encoding when working with the words in the custom language + model. For more information, see [Character encoding for custom + words](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageWords#charEncoding). + With the `curl` command, use the `--data-binary` option to upload the file + for the request. + :param bool allow_overwrite: (optional) If `true`, the specified corpus + overwrites an existing corpus with the same name. If `false`, the request + fails if a corpus with the same name already exists. The parameter has no + effect if a corpus with the same name does not already exist. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ - if customization_id is None: + if not customization_id: raise ValueError('customization_id must be provided') - if corpus_name is None: + if not corpus_name: raise ValueError('corpus_name must be provided') if corpus_file is None: raise ValueError('corpus_file must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', 'add_corpus') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='add_corpus', + ) headers.update(sdk_headers) - params = {'allow_overwrite': allow_overwrite} + params = { + 'allow_overwrite': allow_overwrite, + } - form_data = {} - form_data['corpus_file'] = (None, corpus_file, 'text/plain') + form_data = [] + form_data.append(('corpus_file', (None, corpus_file, 'text/plain'))) - url = '/v1/customizations/{0}/corpora/{1}'.format( - *self._encode_path_vars(customization_id, corpus_name)) - response = self.request( + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id', 'corpus_name'] + path_param_values = self.encode_path_vars(customization_id, corpus_name) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/customizations/{customization_id}/corpora/{corpus_name}'.format( + **path_param_dict) + request = self.prepare_request( method='POST', url=url, headers=headers, params=params, files=form_data, - accept_json=True) - return response - - def delete_corpus(self, customization_id, corpus_name, **kwargs): - """ - Delete a corpus. - - Deletes an existing corpus from a custom language model. The service removes any - out-of-vocabulary (OOV) words that are associated with the corpus from the custom - model's words resource unless they were also added by another corpus or grammar, - or they were modified in some way with the **Add custom words** or **Add a custom - word** method. Removing a corpus does not affect the custom model until you train - the model with the **Train a custom language model** method. You must use - credentials for the instance of the service that owns a model to delete its - corpora. - **See also:** [Deleting a corpus from a custom language - model](https://cloud.ibm.com/docs/services/speech-to-text/language-corpora.html#deleteCorpus). - - :param str customization_id: The customization ID (GUID) of the custom language - model that is to be used for the request. You must make the request with - credentials for the instance of the service that owns the custom model. - :param str corpus_name: The name of the corpus for the custom language model. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if customization_id is None: - raise ValueError('customization_id must be provided') - if corpus_name is None: - raise ValueError('corpus_name must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', 'delete_corpus') - headers.update(sdk_headers) + ) - url = '/v1/customizations/{0}/corpora/{1}'.format( - *self._encode_path_vars(customization_id, corpus_name)) - response = self.request( - method='DELETE', url=url, headers=headers, accept_json=True) + response = self.send(request, **kwargs) return response - def get_corpus(self, customization_id, corpus_name, **kwargs): + def get_corpus( + self, + customization_id: str, + corpus_name: str, + **kwargs, + ) -> DetailedResponse: """ Get a corpus. Gets information about a corpus from a custom language model. The information - includes the total number of words and out-of-vocabulary (OOV) words, name, and - status of the corpus. You must use credentials for the instance of the service - that owns a model to list its corpora. + includes the name, status, and total number of words for the corpus. _For custom + models that are based on previous-generation models_, it also includes the number + of out-of-vocabulary (OOV) words from the corpus. You must use credentials for the + instance of the service that owns a model to list its corpora. **See also:** [Listing corpora for a custom language - model](https://cloud.ibm.com/docs/services/speech-to-text/language-corpora.html#listCorpora). - - :param str customization_id: The customization ID (GUID) of the custom language - model that is to be used for the request. You must make the request with - credentials for the instance of the service that owns the custom model. - :param str corpus_name: The name of the corpus for the custom language model. + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageCorpora#listCorpora). + + :param str customization_id: The customization ID (GUID) of the custom + language model that is to be used for the request. You must make the + request with credentials for the instance of the service that owns the + custom model. + :param str corpus_name: The name of the corpus for the custom language + model. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `Corpus` object """ - if customization_id is None: + if not customization_id: raise ValueError('customization_id must be provided') - if corpus_name is None: + if not corpus_name: raise ValueError('corpus_name must be provided') - headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='get_corpus', + ) + headers.update(sdk_headers) + if 'headers' in kwargs: headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', 'get_corpus') - headers.update(sdk_headers) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id', 'corpus_name'] + path_param_values = self.encode_path_vars(customization_id, corpus_name) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/customizations/{customization_id}/corpora/{corpus_name}'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + ) - url = '/v1/customizations/{0}/corpora/{1}'.format( - *self._encode_path_vars(customization_id, corpus_name)) - response = self.request( - method='GET', url=url, headers=headers, accept_json=True) + response = self.send(request, **kwargs) return response - def list_corpora(self, customization_id, **kwargs): + def delete_corpus( + self, + customization_id: str, + corpus_name: str, + **kwargs, + ) -> DetailedResponse: """ - List corpora. - - Lists information about all corpora from a custom language model. The information - includes the total number of words and out-of-vocabulary (OOV) words, name, and - status of each corpus. You must use credentials for the instance of the service - that owns a model to list its corpora. - **See also:** [Listing corpora for a custom language - model](https://cloud.ibm.com/docs/services/speech-to-text/language-corpora.html#listCorpora). + Delete a corpus. - :param str customization_id: The customization ID (GUID) of the custom language - model that is to be used for the request. You must make the request with - credentials for the instance of the service that owns the custom model. + Deletes an existing corpus from a custom language model. Removing a corpus does + not affect the custom model until you train the model with the [Train a custom + language model](#trainlanguagemodel) method. You must use credentials for the + instance of the service that owns a model to delete its corpora. + _For custom models that are based on previous-generation models_, the service + removes any out-of-vocabulary (OOV) words that are associated with the corpus from + the custom model's words resource unless they were also added by another corpus or + grammar, or they were modified in some way with the [Add custom words](#addwords) + or [Add a custom word](#addword) method. + **See also:** [Deleting a corpus from a custom language + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageCorpora#deleteCorpus). + + :param str customization_id: The customization ID (GUID) of the custom + language model that is to be used for the request. You must make the + request with credentials for the instance of the service that owns the + custom model. + :param str corpus_name: The name of the corpus for the custom language + model. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ - if customization_id is None: + if not customization_id: raise ValueError('customization_id must be provided') - + if not corpus_name: + raise ValueError('corpus_name must be provided') headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='delete_corpus', + ) + headers.update(sdk_headers) + if 'headers' in kwargs: headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', 'list_corpora') - headers.update(sdk_headers) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id', 'corpus_name'] + path_param_values = self.encode_path_vars(customization_id, corpus_name) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/customizations/{customization_id}/corpora/{corpus_name}'.format( + **path_param_dict) + request = self.prepare_request( + method='DELETE', + url=url, + headers=headers, + ) - url = '/v1/customizations/{0}/corpora'.format( - *self._encode_path_vars(customization_id)) - response = self.request( - method='GET', url=url, headers=headers, accept_json=True) + response = self.send(request, **kwargs) return response ######################### # Custom words ######################### - def add_word(self, - customization_id, - word_name, - word=None, - sounds_like=None, - display_as=None, - **kwargs): + def list_words( + self, + customization_id: str, + *, + word_type: Optional[str] = None, + sort: Optional[str] = None, + **kwargs, + ) -> DetailedResponse: """ - Add a custom word. + List custom words. - Adds a custom word to a custom language model. The service populates the words - resource for a custom model with out-of-vocabulary (OOV) words from each corpus or - grammar that is added to the model. You can use this method to add a word or to - modify an existing word in the words resource. The words resource for a model can - contain a maximum of 30 thousand custom (OOV) words. This includes words that the - service extracts from corpora and grammars and words that you add directly. - You must use credentials for the instance of the service that owns a model to add - or modify a custom word for the model. Adding or modifying a custom word does not - affect the custom model until you train the model for the new data by using the - **Train a custom language model** method. - Use the `word_name` parameter to specify the custom word that is to be added or - modified. Use the `CustomWord` object to provide one or both of the optional - `sounds_like` and `display_as` fields for the word. - * The `sounds_like` field provides an array of one or more pronunciations for the - word. Use the parameter to specify how the word can be pronounced by users. Use - the parameter for words that are difficult to pronounce, foreign words, acronyms, - and so on. For example, you might specify that the word `IEEE` can sound like `i - triple e`. You can specify a maximum of five sounds-like pronunciations for a - word. - * The `display_as` field provides a different way of spelling the word in a - transcript. Use the parameter when you want the word to appear different from its - usual representation or from its spelling in training data. For example, you might - indicate that the word `IBM(trademark)` is to be displayed as `IBM™`. - If you add a custom word that already exists in the words resource for the custom - model, the new definition overwrites the existing data for the word. If the - service encounters an error, it does not add the word to the words resource. Use - the **List a custom word** method to review the word that you add. - **See also:** - * [Working with custom - words](https://cloud.ibm.com/docs/services/speech-to-text/language-resource.html#workingWords) - * [Add words to the custom language - model](https://cloud.ibm.com/docs/services/speech-to-text/language-create.html#addWords). - - :param str customization_id: The customization ID (GUID) of the custom language - model that is to be used for the request. You must make the request with - credentials for the instance of the service that owns the custom model. - :param str word_name: The custom word that is to be added to or updated in the - custom language model. Do not include spaces in the word. Use a `-` (dash) or `_` - (underscore) to connect the tokens of compound words. URL-encode the word if it - includes non-ASCII characters. For more information, see [Character - encoding](https://cloud.ibm.com/docs/services/speech-to-text/language-resource.html#charEncoding). - :param str word: For the **Add custom words** method, you must specify the custom - word that is to be added to or updated in the custom model. Do not include spaces - in the word. Use a `-` (dash) or `_` (underscore) to connect the tokens of - compound words. - Omit this parameter for the **Add a custom word** method. - :param list[str] sounds_like: An array of sounds-like pronunciations for the - custom word. Specify how words that are difficult to pronounce, foreign words, - acronyms, and so on can be pronounced by users. - * For a word that is not in the service's base vocabulary, omit the parameter to - have the service automatically generate a sounds-like pronunciation for the word. - * For a word that is in the service's base vocabulary, use the parameter to - specify additional pronunciations for the word. You cannot override the default - pronunciation of a word; pronunciations you add augment the pronunciation from the - base vocabulary. - A word can have at most five sounds-like pronunciations. A pronunciation can - include at most 40 characters not including spaces. - :param str display_as: An alternative spelling for the custom word when it appears - in a transcript. Use the parameter when you want the word to have a spelling that - is different from its usual representation or from its spelling in corpora - training data. + Lists information about custom words from a custom language model. You can list + all words from the custom model's words resource, only custom words that were + added or modified by the user, or, _for a custom model that is based on a + previous-generation model_, only out-of-vocabulary (OOV) words that were extracted + from corpora or are recognized by grammars. _For a custom model that is based on a + next-generation model_, you can list all words or only those words that were added + directly by a user, which return the same results. + You can also indicate the order in which the service is to return words; by + default, the service lists words in ascending alphabetical order. You must use + credentials for the instance of the service that owns a model to list information + about its words. + **See also:** [Listing words from a custom language + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageWords#listWords). + + :param str customization_id: The customization ID (GUID) of the custom + language model that is to be used for the request. You must make the + request with credentials for the instance of the service that owns the + custom model. + :param str word_type: (optional) The type of words to be listed from the + custom language model's words resource: + * `all` (the default) shows all words. + * `user` shows only custom words that were added or modified by the user + directly. + * `corpora` shows only OOV that were extracted from corpora. + * `grammars` shows only OOV words that are recognized by grammars. + _For a custom model that is based on a next-generation model_, only `all` + and `user` apply. Both options return the same results. Words from other + sources are not added to custom models that are based on next-generation + models. + :param str sort: (optional) Indicates the order in which the words are to + be listed, `alphabetical` or by `count`. You can prepend an optional `+` or + `-` to an argument to indicate whether the results are to be sorted in + ascending or descending order. By default, words are sorted in ascending + alphabetical order. For alphabetical ordering, the lexicographical + precedence is numeric values, uppercase letters, and lowercase letters. For + count ordering, values with the same count are ordered alphabetically. With + the `curl` command, URL-encode the `+` symbol as `%2B`. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `Words` object """ - if customization_id is None: + if not customization_id: raise ValueError('customization_id must be provided') - if word_name is None: - raise ValueError('word_name must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', 'add_word') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='list_words', + ) headers.update(sdk_headers) - data = { - 'word': word, - 'sounds_like': sounds_like, - 'display_as': display_as + params = { + 'word_type': word_type, + 'sort': sort, } - url = '/v1/customizations/{0}/words/{1}'.format( - *self._encode_path_vars(customization_id, word_name)) - response = self.request( - method='PUT', url=url, headers=headers, json=data, accept_json=True) + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id'] + path_param_values = self.encode_path_vars(customization_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/customizations/{customization_id}/words'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) return response - def add_words(self, customization_id, words, **kwargs): + def add_words( + self, + customization_id: str, + words: List['CustomWord'], + **kwargs, + ) -> DetailedResponse: """ Add custom words. - Adds one or more custom words to a custom language model. The service populates + Adds one or more custom words to a custom language model. You can use this method + to add words or to modify existing words in a custom model's words resource. _For + custom models that are based on previous-generation models_, the service populates the words resource for a custom model with out-of-vocabulary (OOV) words from each - corpus or grammar that is added to the model. You can use this method to add - additional words or to modify existing words in the words resource. The words - resource for a model can contain a maximum of 30 thousand custom (OOV) words. This + corpus or grammar that is added to the model. You can use this method to modify + OOV words in the model's words resource. + _For a custom model that is based on a previous-generation model_, the words + resource for a model can contain a maximum of 90 thousand custom (OOV) words. This includes words that the service extracts from corpora and grammars and words that you add directly. You must use credentials for the instance of the service that owns a model to add or modify custom words for the model. Adding or modifying custom words does not affect the custom model until you train the model for the new data by using the - **Train a custom language model** method. + [Train a custom language model](#trainlanguagemodel) method. You add custom words by providing a `CustomWords` object, which is an array of - `CustomWord` objects, one per word. You must use the object's `word` parameter to - identify the word that is to be added. You can also provide one or both of the - optional `sounds_like` and `display_as` fields for each word. - * The `sounds_like` field provides an array of one or more pronunciations for the - word. Use the parameter to specify how the word can be pronounced by users. Use - the parameter for words that are difficult to pronounce, foreign words, acronyms, - and so on. For example, you might specify that the word `IEEE` can sound like `i - triple e`. You can specify a maximum of five sounds-like pronunciations for a - word. + `CustomWord` objects, one per word. Use the object's `word` parameter to identify + the word that is to be added. You can also provide one or both of the optional + `display_as` or `sounds_like` fields for each word. * The `display_as` field provides a different way of spelling the word in a transcript. Use the parameter when you want the word to appear different from its usual representation or from its spelling in training data. For example, you might - indicate that the word `IBM(trademark)` is to be displayed as `IBM™`. + indicate that the word `IBM` is to be displayed as `IBM™`. + * The `sounds_like` field provides an array of one or more pronunciations for the + word. Use the parameter to specify how the word can be pronounced by users. Use + the parameter for words that are difficult to pronounce, foreign words, acronyms, + and so on. For example, you might specify that the word `IEEE` can sound like `I + triple E`. You can specify a maximum of five sounds-like pronunciations for a + word. _For a custom model that is based on a previous-generation model_, if you + omit the `sounds_like` field, the service attempts to set the field to its + pronunciation of the word. It cannot generate a pronunciation for all words, so + you must review the word's definition to ensure that it is complete and valid. + * The `mapping_only` field provides parameter for custom words. You can use the + 'mapping_only' key in custom words as a form of post processing. This key + parameter has a boolean value to determine whether 'sounds_like' (for non-Japanese + models) or word (for Japanese) is not used for the model fine-tuning, but for the + replacement for 'display_as'. This feature helps you when you use custom words + exclusively to map 'sounds_like' (or word) to 'display_as' value. When you use + custom words solely for post-processing purposes that does not need fine-tuning. If you add a custom word that already exists in the words resource for the custom model, the new definition overwrites the existing data for the word. If the service encounters an error with the input data, it returns a failure code and @@ -1692,101 +2642,245 @@ def add_words(self, customization_id, words, **kwargs): asynchronously processes the words to add them to the model's words resource. The time that it takes for the analysis to complete depends on the number of new words that you add but is generally faster than adding a corpus or grammar. - You can monitor the status of the request by using the **List a custom language - model** method to poll the model's status. Use a loop to check the status every 10 - seconds. The method returns a `Customization` object that includes a `status` - field. A status of `ready` means that the words have been added to the custom - model. The service cannot accept requests to add new data or to train the model - until the existing request completes. - You can use the **List custom words** or **List a custom word** method to review - the words that you add. Words with an invalid `sounds_like` field include an - `error` field that describes the problem. You can use other words-related methods - to correct errors, eliminate typos, and modify how words are pronounced as needed. + You can monitor the status of the request by using the [Get a custom language + model](#getlanguagemodel) method to poll the model's status. Use a loop to check + the status every 10 seconds. The method returns a `Customization` object that + includes a `status` field. A status of `ready` means that the words have been + added to the custom model. The service cannot accept requests to add new data or + to train the model until the existing request completes. + You can use the [List custom words](#listwords) or [Get a custom word](#getword) + method to review the words that you add. Words with an invalid `sounds_like` field + include an `error` field that describes the problem. You can use other + words-related methods to correct errors, eliminate typos, and modify how words are + pronounced as needed. **See also:** - * [Working with custom - words](https://cloud.ibm.com/docs/services/speech-to-text/language-resource.html#workingWords) * [Add words to the custom language - model](https://cloud.ibm.com/docs/services/speech-to-text/language-create.html#addWords). - - :param str customization_id: The customization ID (GUID) of the custom language - model that is to be used for the request. You must make the request with - credentials for the instance of the service that owns the custom model. - :param list[CustomWord] words: An array of `CustomWord` objects that provides - information about each custom word that is to be added to or updated in the custom - language model. + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-languageCreate#addWords) + * [Working with custom words for previous-generation + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-corporaWords#workingWords) + * [Working with custom words for large speech models and next-generation + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-corporaWords-ng#workingWords-ng) + * [Validating a words resource for previous-generation + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-corporaWords#validateModel) + * [Validating a words resource for large speech models and next-generation + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-corporaWords-ng#validateModel-ng). + + :param str customization_id: The customization ID (GUID) of the custom + language model that is to be used for the request. You must make the + request with credentials for the instance of the service that owns the + custom model. + :param List[CustomWord] words: An array of `CustomWord` objects that + provides information about each custom word that is to be added to or + updated in the custom language model. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ - if customization_id is None: + if not customization_id: raise ValueError('customization_id must be provided') if words is None: raise ValueError('words must be provided') - words = [self._convert_model(x, CustomWord) for x in words] - + words = [convert_model(x) for x in words] headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', 'add_words') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='add_words', + ) headers.update(sdk_headers) - data = {'words': words} + data = { + 'words': words, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' - url = '/v1/customizations/{0}/words'.format( - *self._encode_path_vars(customization_id)) - response = self.request( + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id'] + path_param_values = self.encode_path_vars(customization_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/customizations/{customization_id}/words'.format( + **path_param_dict) + request = self.prepare_request( method='POST', url=url, headers=headers, - json=data, - accept_json=True) + data=data, + ) + + response = self.send(request, **kwargs) return response - def delete_word(self, customization_id, word_name, **kwargs): + def add_word( + self, + customization_id: str, + word_name: str, + *, + word: Optional[str] = None, + mapping_only: Optional[List[str]] = None, + sounds_like: Optional[List[str]] = None, + display_as: Optional[str] = None, + **kwargs, + ) -> DetailedResponse: """ - Delete a custom word. - - Deletes a custom word from a custom language model. You can remove any word that - you added to the custom model's words resource via any means. However, if the word - also exists in the service's base vocabulary, the service removes only the custom - pronunciation for the word; the word remains in the base vocabulary. Removing a - custom word does not affect the custom model until you train the model with the - **Train a custom language model** method. You must use credentials for the - instance of the service that owns a model to delete its words. - **See also:** [Deleting a word from a custom language - model](https://cloud.ibm.com/docs/services/speech-to-text/language-words.html#deleteWord). + Add a custom word. - :param str customization_id: The customization ID (GUID) of the custom language - model that is to be used for the request. You must make the request with - credentials for the instance of the service that owns the custom model. - :param str word_name: The custom word that is to be deleted from the custom - language model. URL-encode the word if it includes non-ASCII characters. For more - information, see [Character - encoding](https://cloud.ibm.com/docs/services/speech-to-text/language-resource.html#charEncoding). + Adds a custom word to a custom language model. You can use this method to add a + word or to modify an existing word in the words resource. _For custom models that + are based on previous-generation models_, the service populates the words resource + for a custom model with out-of-vocabulary (OOV) words from each corpus or grammar + that is added to the model. You can use this method to modify OOV words in the + model's words resource. + _For a custom model that is based on a previous-generation models_, the words + resource for a model can contain a maximum of 90 thousand custom (OOV) words. This + includes words that the service extracts from corpora and grammars and words that + you add directly. + You must use credentials for the instance of the service that owns a model to add + or modify a custom word for the model. Adding or modifying a custom word does not + affect the custom model until you train the model for the new data by using the + [Train a custom language model](#trainlanguagemodel) method. + Use the `word_name` parameter to specify the custom word that is to be added or + modified. Use the `CustomWord` object to provide one or both of the optional + `display_as` or `sounds_like` fields for the word. + * The `display_as` field provides a different way of spelling the word in a + transcript. Use the parameter when you want the word to appear different from its + usual representation or from its spelling in training data. For example, you might + indicate that the word `IBM` is to be displayed as `IBM™`. + * The `sounds_like` field provides an array of one or more pronunciations for the + word. Use the parameter to specify how the word can be pronounced by users. Use + the parameter for words that are difficult to pronounce, foreign words, acronyms, + and so on. For example, you might specify that the word `IEEE` can sound like `i + triple e`. You can specify a maximum of five sounds-like pronunciations for a + word. _For custom models that are based on previous-generation models_, if you + omit the `sounds_like` field, the service attempts to set the field to its + pronunciation of the word. It cannot generate a pronunciation for all words, so + you must review the word's definition to ensure that it is complete and valid. + If you add a custom word that already exists in the words resource for the custom + model, the new definition overwrites the existing data for the word. If the + service encounters an error, it does not add the word to the words resource. Use + the [Get a custom word](#getword) method to review the word that you add. + **See also:** + * [Add words to the custom language + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-languageCreate#addWords) + * [Working with custom words for previous-generation + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-corporaWords#workingWords) + * [Working with custom words for large speech models and next-generation + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-corporaWords-ng#workingWords-ng) + * [Validating a words resource for previous-generation + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-corporaWords#validateModel) + * [Validating a words resource for large speech models and next-generation + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-corporaWords-ng#validateModel-ng). + + :param str customization_id: The customization ID (GUID) of the custom + language model that is to be used for the request. You must make the + request with credentials for the instance of the service that owns the + custom model. + :param str word_name: The custom word that is to be added to or updated in + the custom language model. Do not use characters that need to be + URL-encoded, for example, spaces, slashes, backslashes, colons, ampersands, + double quotes, plus signs, equals signs, or question marks. Use a `-` + (dash) or `_` (underscore) to connect the tokens of compound words. + URL-encode the word if it includes non-ASCII characters. For more + information, see [Character + encoding](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-corporaWords#charEncoding). + :param str word: (optional) For the [Add custom words](#addwords) method, + you must specify the custom word that is to be added to or updated in the + custom model. Do not use characters that need to be URL-encoded, for + example, spaces, slashes, backslashes, colons, ampersands, double quotes, + plus signs, equals signs, or question marks. Use a `-` (dash) or `_` + (underscore) to connect the tokens of compound words. A Japanese custom + word can include at most 25 characters, not including leading or trailing + spaces. + Omit this parameter for the [Add a custom word](#addword) method. + :param List[str] mapping_only: (optional) Parameter for custom words. You + can use the 'mapping_only' key in custom words as a form of post + processing. This key parameter has a boolean value to determine whether + 'sounds_like' (for non-Japanese models) or word (for Japanese) is not used + for the model fine-tuning, but for the replacement for 'display_as'. This + feature helps you when you use custom words exclusively to map + 'sounds_like' (or word) to 'display_as' value. When you use custom words + solely for post-processing purposes that does not need fine-tuning. + :param List[str] sounds_like: (optional) As array of sounds-like + pronunciations for the custom word. Specify how words that are difficult to + pronounce, foreign words, acronyms, and so on can be pronounced by users. + * _For custom models that are based on previous-generation models_, for a + word that is not in the service's base vocabulary, omit the parameter to + have the service automatically generate a sounds-like pronunciation for the + word. + * For a word that is in the service's base vocabulary, use the parameter to + specify additional pronunciations for the word. You cannot override the + default pronunciation of a word; pronunciations you add augment the + pronunciation from the base vocabulary. + A word can have at most five sounds-like pronunciations. A pronunciation + can include at most 40 characters, not including leading or trailing + spaces. A Japanese pronunciation can include at most 25 characters, not + including leading or trailing spaces. + :param str display_as: (optional) An alternative spelling for the custom + word when it appears in a transcript. Use the parameter when you want the + word to have a spelling that is different from its usual representation or + from its spelling in corpora training data. + _For custom models that are based on next-generation models_, the service + uses the spelling of the word as the display-as value if you omit the + field. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ - if customization_id is None: + if not customization_id: raise ValueError('customization_id must be provided') - if word_name is None: + if not word_name: raise ValueError('word_name must be provided') - headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='add_word', + ) + headers.update(sdk_headers) + + data = { + 'word': word, + 'mapping_only': mapping_only, + 'sounds_like': sounds_like, + 'display_as': display_as, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + if 'headers' in kwargs: headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', 'delete_word') - headers.update(sdk_headers) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id', 'word_name'] + path_param_values = self.encode_path_vars(customization_id, word_name) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/customizations/{customization_id}/words/{word_name}'.format( + **path_param_dict) + request = self.prepare_request( + method='PUT', + url=url, + headers=headers, + data=data, + ) - url = '/v1/customizations/{0}/words/{1}'.format( - *self._encode_path_vars(customization_id, word_name)) - response = self.request( - method='DELETE', url=url, headers=headers, accept_json=True) + response = self.send(request, **kwargs) return response - def get_word(self, customization_id, word_name, **kwargs): + def get_word( + self, + customization_id: str, + word_name: str, + **kwargs, + ) -> DetailedResponse: """ Get a custom word. @@ -1794,105 +2888,186 @@ def get_word(self, customization_id, word_name, **kwargs): credentials for the instance of the service that owns a model to list information about its words. **See also:** [Listing words from a custom language - model](https://cloud.ibm.com/docs/services/speech-to-text/language-words.html#listWords). - - :param str customization_id: The customization ID (GUID) of the custom language - model that is to be used for the request. You must make the request with - credentials for the instance of the service that owns the custom model. - :param str word_name: The custom word that is to be read from the custom language - model. URL-encode the word if it includes non-ASCII characters. For more - information, see [Character - encoding](https://cloud.ibm.com/docs/services/speech-to-text/language-resource.html#charEncoding). + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageWords#listWords). + + :param str customization_id: The customization ID (GUID) of the custom + language model that is to be used for the request. You must make the + request with credentials for the instance of the service that owns the + custom model. + :param str word_name: The custom word that is to be read from the custom + language model. URL-encode the word if it includes non-ASCII characters. + For more information, see [Character + encoding](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-corporaWords#charEncoding). :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `Word` object """ - if customization_id is None: + if not customization_id: raise ValueError('customization_id must be provided') - if word_name is None: + if not word_name: raise ValueError('word_name must be provided') - headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='get_word', + ) + headers.update(sdk_headers) + if 'headers' in kwargs: headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', 'get_word') - headers.update(sdk_headers) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id', 'word_name'] + path_param_values = self.encode_path_vars(customization_id, word_name) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/customizations/{customization_id}/words/{word_name}'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + ) - url = '/v1/customizations/{0}/words/{1}'.format( - *self._encode_path_vars(customization_id, word_name)) - response = self.request( - method='GET', url=url, headers=headers, accept_json=True) + response = self.send(request, **kwargs) return response - def list_words(self, customization_id, word_type=None, sort=None, **kwargs): + def delete_word( + self, + customization_id: str, + word_name: str, + **kwargs, + ) -> DetailedResponse: """ - List custom words. + Delete a custom word. - Lists information about custom words from a custom language model. You can list - all words from the custom model's words resource, only custom words that were - added or modified by the user, or only out-of-vocabulary (OOV) words that were - extracted from corpora or are recognized by grammars. You can also indicate the - order in which the service is to return words; by default, the service lists words - in ascending alphabetical order. You must use credentials for the instance of the - service that owns a model to list information about its words. - **See also:** [Listing words from a custom language - model](https://cloud.ibm.com/docs/services/speech-to-text/language-words.html#listWords). + Deletes a custom word from a custom language model. You can remove any word that + you added to the custom model's words resource via any means. However, if the word + also exists in the service's base vocabulary, the service removes the word only + from the words resource; the word remains in the base vocabulary. Removing a + custom word does not affect the custom model until you train the model with the + [Train a custom language model](#trainlanguagemodel) method. You must use + credentials for the instance of the service that owns a model to delete its words. + **See also:** [Deleting a word from a custom language + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageWords#deleteWord). - :param str customization_id: The customization ID (GUID) of the custom language - model that is to be used for the request. You must make the request with - credentials for the instance of the service that owns the custom model. - :param str word_type: The type of words to be listed from the custom language - model's words resource: - * `all` (the default) shows all words. - * `user` shows only custom words that were added or modified by the user directly. - * `corpora` shows only OOV that were extracted from corpora. - * `grammars` shows only OOV words that are recognized by grammars. - :param str sort: Indicates the order in which the words are to be listed, - `alphabetical` or by `count`. You can prepend an optional `+` or `-` to an - argument to indicate whether the results are to be sorted in ascending or - descending order. By default, words are sorted in ascending alphabetical order. - For alphabetical ordering, the lexicographical precedence is numeric values, - uppercase letters, and lowercase letters. For count ordering, values with the same - count are ordered alphabetically. With the `curl` command, URL encode the `+` - symbol as `%2B`. + :param str customization_id: The customization ID (GUID) of the custom + language model that is to be used for the request. You must make the + request with credentials for the instance of the service that owns the + custom model. + :param str word_name: The custom word that is to be deleted from the custom + language model. URL-encode the word if it includes non-ASCII characters. + For more information, see [Character + encoding](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-corporaWords#charEncoding). :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ - if customization_id is None: + if not customization_id: raise ValueError('customization_id must be provided') - + if not word_name: + raise ValueError('word_name must be provided') headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', 'list_words') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='delete_word', + ) headers.update(sdk_headers) - params = {'word_type': word_type, 'sort': sort} - - url = '/v1/customizations/{0}/words'.format( - *self._encode_path_vars(customization_id)) - response = self.request( - method='GET', + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id', 'word_name'] + path_param_values = self.encode_path_vars(customization_id, word_name) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/customizations/{customization_id}/words/{word_name}'.format( + **path_param_dict) + request = self.prepare_request( + method='DELETE', url=url, headers=headers, - params=params, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response ######################### # Custom grammars ######################### - def add_grammar(self, - customization_id, - grammar_name, - grammar_file, - content_type, - allow_overwrite=None, - **kwargs): + def list_grammars( + self, + customization_id: str, + **kwargs, + ) -> DetailedResponse: + """ + List grammars. + + Lists information about all grammars from a custom language model. For each + grammar, the information includes the name, status, and (for grammars that are + based on previous-generation models) the total number of out-of-vocabulary (OOV) + words. You must use credentials for the instance of the service that owns a model + to list its grammars. + **See also:** + * [Listing grammars from a custom language + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageGrammars#listGrammars) + * [Language support for + customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-support). + + :param str customization_id: The customization ID (GUID) of the custom + language model that is to be used for the request. You must make the + request with credentials for the instance of the service that owns the + custom model. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `Grammars` object + """ + + if not customization_id: + raise ValueError('customization_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='list_grammars', + ) + headers.update(sdk_headers) + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id'] + path_param_values = self.encode_path_vars(customization_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/customizations/{customization_id}/grammars'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + ) + + response = self.send(request, **kwargs) + return response + + def add_grammar( + self, + customization_id: str, + grammar_name: str, + grammar_file: BinaryIO, + content_type: str, + *, + allow_overwrite: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: """ Add a grammar. @@ -1900,218 +3075,272 @@ def add_grammar(self, UTF-8 format that defines the grammar. Use multiple requests to submit multiple grammar files. You must use credentials for the instance of the service that owns a model to add a grammar to it. Adding a grammar does not affect the custom - language model until you train the model for the new data by using the **Train a - custom language model** method. + language model until you train the model for the new data by using the [Train a + custom language model](#trainlanguagemodel) method. The call returns an HTTP 201 response code if the grammar is valid. The service then asynchronously processes the contents of the grammar and automatically - extracts new words that it finds. This can take a few seconds to complete - depending on the size and complexity of the grammar, as well as the current load - on the service. You cannot submit requests to add additional resources to the - custom model or to train the model until the service's analysis of the grammar for - the current request completes. Use the **Get a grammar** method to check the - status of the analysis. - The service populates the model's words resource with any word that is recognized - by the grammar that is not found in the model's base vocabulary. These are - referred to as out-of-vocabulary (OOV) words. You can use the **List custom - words** method to examine the words resource and use other words-related methods - to eliminate typos and modify how words are pronounced as needed. + extracts new words that it finds. This operation can take a few seconds or minutes + to complete depending on the size and complexity of the grammar, as well as the + current load on the service. You cannot submit requests to add additional + resources to the custom model or to train the model until the service's analysis + of the grammar for the current request completes. Use the [Get a + grammar](#getgrammar) method to check the status of the analysis. + _For grammars that are based on previous-generation models,_ the service populates + the model's words resource with any word that is recognized by the grammar that is + not found in the model's base vocabulary. These are referred to as + out-of-vocabulary (OOV) words. You can use the [List custom words](#listwords) + method to examine the words resource and use other words-related methods to + eliminate typos and modify how words are pronounced as needed. _For grammars that + are based on next-generation models,_ the service extracts no OOV words from the + grammars. To add a grammar that has the same name as an existing grammar, set the `allow_overwrite` parameter to `true`; otherwise, the request fails. Overwriting an existing grammar causes the service to process the grammar file and extract OOV words anew. Before doing so, it removes any OOV words associated with the existing grammar from the model's words resource unless they were also added by another - resource or they have been modified in some way with the **Add custom words** or - **Add a custom word** method. - The service limits the overall amount of data that you can add to a custom model - to a maximum of 10 million total words from all sources combined. Also, you can - add no more than 30 thousand OOV words to a model. This includes words that the - service extracts from corpora and grammars and words that you add directly. + resource or they have been modified in some way with the [Add custom + words](#addwords) or [Add a custom word](#addword) method. + _For grammars that are based on previous-generation models,_ the service limits + the overall amount of data that you can add to a custom model to a maximum of 10 + million total words from all sources combined. Also, you can add no more than 90 + thousand OOV words to a model. This includes words that the service extracts from + corpora and grammars and words that you add directly. **See also:** - * [Working with grammars](https://cloud.ibm.com/docs/services/speech-to-text/) - * [Add grammars to the custom language - model](https://cloud.ibm.com/docs/services/speech-to-text/). - - :param str customization_id: The customization ID (GUID) of the custom language - model that is to be used for the request. You must make the request with - credentials for the instance of the service that owns the custom model. - :param str grammar_name: The name of the new grammar for the custom language - model. Use a localized name that matches the language of the custom model and - reflects the contents of the grammar. - * Include a maximum of 128 characters in the name. - * Do not include spaces, slashes, or backslashes in the name. - * Do not use the name of an existing grammar or corpus that is already defined for - the custom model. - * Do not use the name `user`, which is reserved by the service to denote custom - words that are added or modified by the user. - :param str grammar_file: A plain text file that contains the grammar in the format - specified by the `Content-Type` header. Encode the file in UTF-8 (ASCII is a - subset of UTF-8). Using any other encoding can lead to issues when compiling the - grammar or to unexpected results in decoding. The service ignores an encoding that - is specified in the header of the grammar. + * [Understanding + grammars](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-grammarUnderstand#grammarUnderstand) + * [Add a grammar to the custom language + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-grammarAdd#addGrammar) + * [Language support for + customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-support). + + :param str customization_id: The customization ID (GUID) of the custom + language model that is to be used for the request. You must make the + request with credentials for the instance of the service that owns the + custom model. + :param str grammar_name: The name of the new grammar for the custom + language model. Use a localized name that matches the language of the + custom model and reflects the contents of the grammar. + * Include a maximum of 128 characters in the name. + * Do not use characters that need to be URL-encoded. For example, do not + use spaces, slashes, backslashes, colons, ampersands, double quotes, plus + signs, equals signs, questions marks, and so on in the name. (The service + does not prevent the use of these characters. But because they must be + URL-encoded wherever used, their use is strongly discouraged.) + * Do not use the name of an existing grammar or corpus that is already + defined for the custom model. + * Do not use the name `user`, which is reserved by the service to denote + custom words that are added or modified by the user. + * Do not use the name `base_lm` or `default_lm`. Both names are reserved + for future use by the service. + :param BinaryIO grammar_file: A plain text file that contains the grammar + in the format specified by the `Content-Type` header. Encode the file in + UTF-8 (ASCII is a subset of UTF-8). Using any other encoding can lead to + issues when compiling the grammar or to unexpected results in decoding. The + service ignores an encoding that is specified in the header of the grammar. + With the `curl` command, use the `--data-binary` option to upload the file + for the request. :param str content_type: The format (MIME type) of the grammar file: - * `application/srgs` for Augmented Backus-Naur Form (ABNF), which uses a - plain-text representation that is similar to traditional BNF grammars. - * `application/srgs+xml` for XML Form, which uses XML elements to represent the - grammar. - :param bool allow_overwrite: If `true`, the specified grammar overwrites an - existing grammar with the same name. If `false`, the request fails if a grammar - with the same name already exists. The parameter has no effect if a grammar with - the same name does not already exist. + * `application/srgs` for Augmented Backus-Naur Form (ABNF), which uses a + plain-text representation that is similar to traditional BNF grammars. + * `application/srgs+xml` for XML Form, which uses XML elements to represent + the grammar. + :param bool allow_overwrite: (optional) If `true`, the specified grammar + overwrites an existing grammar with the same name. If `false`, the request + fails if a grammar with the same name already exists. The parameter has no + effect if a grammar with the same name does not already exist. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ - if customization_id is None: + if not customization_id: raise ValueError('customization_id must be provided') - if grammar_name is None: + if not grammar_name: raise ValueError('grammar_name must be provided') if grammar_file is None: raise ValueError('grammar_file must be provided') - if content_type is None: + if not content_type: raise ValueError('content_type must be provided') - - headers = {'Content-Type': content_type} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', 'add_grammar') + headers = { + 'Content-Type': content_type, + } + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='add_grammar', + ) headers.update(sdk_headers) - params = {'allow_overwrite': allow_overwrite} + params = { + 'allow_overwrite': allow_overwrite, + } data = grammar_file - url = '/v1/customizations/{0}/grammars/{1}'.format( - *self._encode_path_vars(customization_id, grammar_name)) - response = self.request( + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id', 'grammar_name'] + path_param_values = self.encode_path_vars(customization_id, + grammar_name) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/customizations/{customization_id}/grammars/{grammar_name}'.format( + **path_param_dict) + request = self.prepare_request( method='POST', url=url, headers=headers, params=params, data=data, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response - def delete_grammar(self, customization_id, grammar_name, **kwargs): + def get_grammar( + self, + customization_id: str, + grammar_name: str, + **kwargs, + ) -> DetailedResponse: """ - Delete a grammar. + Get a grammar. - Deletes an existing grammar from a custom language model. The service removes any - out-of-vocabulary (OOV) words associated with the grammar from the custom model's - words resource unless they were also added by another resource or they were - modified in some way with the **Add custom words** or **Add a custom word** - method. Removing a grammar does not affect the custom model until you train the - model with the **Train a custom language model** method. You must use credentials - for the instance of the service that owns a model to delete its grammar. - **See also:** [Deleting a grammar from a custom language - model](https://cloud.ibm.com/docs/services/speech-to-text/). - - :param str customization_id: The customization ID (GUID) of the custom language - model that is to be used for the request. You must make the request with - credentials for the instance of the service that owns the custom model. - :param str grammar_name: The name of the grammar for the custom language model. + Gets information about a grammar from a custom language model. For each grammar, + the information includes the name, status, and (for grammars that are based on + previous-generation models) the total number of out-of-vocabulary (OOV) words. You + must use credentials for the instance of the service that owns a model to list its + grammars. + **See also:** + * [Listing grammars from a custom language + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageGrammars#listGrammars) + * [Language support for + customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-support). + + :param str customization_id: The customization ID (GUID) of the custom + language model that is to be used for the request. You must make the + request with credentials for the instance of the service that owns the + custom model. + :param str grammar_name: The name of the grammar for the custom language + model. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `Grammar` object """ - if customization_id is None: + if not customization_id: raise ValueError('customization_id must be provided') - if grammar_name is None: + if not grammar_name: raise ValueError('grammar_name must be provided') - headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='get_grammar', + ) + headers.update(sdk_headers) + if 'headers' in kwargs: headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', 'delete_grammar') - headers.update(sdk_headers) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id', 'grammar_name'] + path_param_values = self.encode_path_vars(customization_id, + grammar_name) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/customizations/{customization_id}/grammars/{grammar_name}'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + ) - url = '/v1/customizations/{0}/grammars/{1}'.format( - *self._encode_path_vars(customization_id, grammar_name)) - response = self.request( - method='DELETE', url=url, headers=headers, accept_json=True) + response = self.send(request, **kwargs) return response - def get_grammar(self, customization_id, grammar_name, **kwargs): + def delete_grammar( + self, + customization_id: str, + grammar_name: str, + **kwargs, + ) -> DetailedResponse: """ - Get a grammar. + Delete a grammar. - Gets information about a grammar from a custom language model. The information - includes the total number of out-of-vocabulary (OOV) words, name, and status of - the grammar. You must use credentials for the instance of the service that owns a - model to list its grammars. - **See also:** [Listing grammars from a custom language - model](https://cloud.ibm.com/docs/services/speech-to-text/). - - :param str customization_id: The customization ID (GUID) of the custom language - model that is to be used for the request. You must make the request with - credentials for the instance of the service that owns the custom model. - :param str grammar_name: The name of the grammar for the custom language model. + Deletes an existing grammar from a custom language model. _For grammars that are + based on previous-generation models,_ the service removes any out-of-vocabulary + (OOV) words associated with the grammar from the custom model's words resource + unless they were also added by another resource or they were modified in some way + with the [Add custom words](#addwords) or [Add a custom word](#addword) method. + Removing a grammar does not affect the custom model until you train the model with + the [Train a custom language model](#trainlanguagemodel) method. You must use + credentials for the instance of the service that owns a model to delete its + grammar. + **See also:** + * [Deleting a grammar from a custom language + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageGrammars#deleteGrammar) + * [Language support for + customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-support). + + :param str customization_id: The customization ID (GUID) of the custom + language model that is to be used for the request. You must make the + request with credentials for the instance of the service that owns the + custom model. + :param str grammar_name: The name of the grammar for the custom language + model. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ - if customization_id is None: + if not customization_id: raise ValueError('customization_id must be provided') - if grammar_name is None: + if not grammar_name: raise ValueError('grammar_name must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', 'get_grammar') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='delete_grammar', + ) headers.update(sdk_headers) - url = '/v1/customizations/{0}/grammars/{1}'.format( - *self._encode_path_vars(customization_id, grammar_name)) - response = self.request( - method='GET', url=url, headers=headers, accept_json=True) - return response - - def list_grammars(self, customization_id, **kwargs): - """ - List grammars. - - Lists information about all grammars from a custom language model. The information - includes the total number of out-of-vocabulary (OOV) words, name, and status of - each grammar. You must use credentials for the instance of the service that owns a - model to list its grammars. - **See also:** [Listing grammars from a custom language - model](https://cloud.ibm.com/docs/services/speech-to-text/). - - :param str customization_id: The customization ID (GUID) of the custom language - model that is to be used for the request. You must make the request with - credentials for the instance of the service that owns the custom model. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if customization_id is None: - raise ValueError('customization_id must be provided') - - headers = {} if 'headers' in kwargs: headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', 'list_grammars') - headers.update(sdk_headers) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id', 'grammar_name'] + path_param_values = self.encode_path_vars(customization_id, + grammar_name) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/customizations/{customization_id}/grammars/{grammar_name}'.format( + **path_param_dict) + request = self.prepare_request( + method='DELETE', + url=url, + headers=headers, + ) - url = '/v1/customizations/{0}/grammars'.format( - *self._encode_path_vars(customization_id)) - response = self.request( - method='GET', url=url, headers=headers, accept_json=True) + response = self.send(request, **kwargs) return response ######################### # Custom acoustic models ######################### - def create_acoustic_model(self, - name, - base_model_name, - description=None, - **kwargs): + def create_acoustic_model( + self, + name: str, + base_model_name: str, + *, + description: Optional[str] = None, + **kwargs, + ) -> DetailedResponse: """ Create a custom acoustic model. @@ -2119,201 +3348,261 @@ def create_acoustic_model(self, acoustic model can be used only with the base model for which it is created. The model is owned by the instance of the service whose credentials are used to create it. + You can create a maximum of 1024 custom acoustic models per owning credentials. + The service returns an error if you attempt to create more than 1024 models. You + do not lose any models, but you cannot create any more until your model count is + below the limit. + **Note:** Acoustic model customization is supported only for use with + previous-generation models. It is not supported for large speech models and + next-generation models. + **Important:** Effective **31 July 2023**, all previous-generation models will be + removed from the service and the documentation. Most previous-generation models + were deprecated on 15 March 2022. You must migrate to the equivalent large speech + model or next-generation model by 31 July 2023. For more information, see + [Migrating to large speech + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-migrate). **See also:** [Create a custom acoustic - model](https://cloud.ibm.com/docs/services/speech-to-text/acoustic-create.html#createModel-acoustic). - - :param str name: A user-defined name for the new custom acoustic model. Use a name - that is unique among all custom acoustic models that you own. Use a localized name - that matches the language of the custom model. Use a name that describes the - acoustic environment of the custom model, such as `Mobile custom model` or `Noisy - car custom model`. - :param str base_model_name: The name of the base language model that is to be - customized by the new custom acoustic model. The new custom model can be used only - with the base model that it customizes. - To determine whether a base model supports acoustic model customization, refer to - [Language support for - customization](https://cloud.ibm.com/docs/services/speech-to-text/custom.html#languageSupport). - :param str description: A description of the new custom acoustic model. Use a - localized description that matches the language of the custom model. + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-acoustic#createModel-acoustic). + + :param str name: A user-defined name for the new custom acoustic model. Use + a localized name that matches the language of the custom model. Use a name + that describes the acoustic environment of the custom model, such as + `Mobile custom model` or `Noisy car custom model`. Use a name that is + unique among all custom acoustic models that you own. + Include a maximum of 256 characters in the name. Do not use backslashes, + slashes, colons, equal signs, ampersands, or question marks in the name. + :param str base_model_name: The name of the base language model that is to + be customized by the new custom acoustic model. The new custom model can be + used only with the base model that it customizes. + To determine whether a base model supports acoustic model customization, + refer to [Language support for + customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-support). + :param str description: (optional) A recommended description of the new + custom acoustic model. Use a localized description that matches the + language of the custom model. Include a maximum of 128 characters in the + description. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `AcousticModel` object """ if name is None: raise ValueError('name must be provided') if base_model_name is None: raise ValueError('base_model_name must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', - 'create_acoustic_model') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='create_acoustic_model', + ) headers.update(sdk_headers) data = { 'name': name, 'base_model_name': base_model_name, - 'description': description + 'description': description, } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' url = '/v1/acoustic_customizations' - response = self.request( + request = self.prepare_request( method='POST', url=url, headers=headers, - json=data, - accept_json=True) + data=data, + ) + + response = self.send(request, **kwargs) return response - def delete_acoustic_model(self, customization_id, **kwargs): + def list_acoustic_models( + self, + *, + language: Optional[str] = None, + **kwargs, + ) -> DetailedResponse: """ - Delete a custom acoustic model. - - Deletes an existing custom acoustic model. The custom model cannot be deleted if - another request, such as adding an audio resource to the model, is currently being - processed. You must use credentials for the instance of the service that owns a - model to delete it. - **See also:** [Deleting a custom acoustic - model](https://cloud.ibm.com/docs/services/speech-to-text/acoustic-models.html#deleteModel-acoustic). + List custom acoustic models. - :param str customization_id: The customization ID (GUID) of the custom acoustic - model that is to be used for the request. You must make the request with - credentials for the instance of the service that owns the custom model. + Lists information about all custom acoustic models that are owned by an instance + of the service. Use the `language` parameter to see all custom acoustic models for + the specified language. Omit the parameter to see all custom acoustic models for + all languages. You must use credentials for the instance of the service that owns + a model to list information about it. + **Note:** Acoustic model customization is supported only for use with + previous-generation models. It is not supported for large speech models and + next-generation models. + **See also:** [Listing custom acoustic + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageAcousticModels#listModels-acoustic). + + :param str language: (optional) The identifier of the language for which + custom language or custom acoustic models are to be returned. Specify the + five-character language identifier; for example, specify `en-US` to see all + custom language or custom acoustic models that are based on US English + models. Omit the parameter to see all custom language or custom acoustic + models that are owned by the requesting credentials. + To determine the languages for which customization is available, see + [Language support for + customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-support). :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `AcousticModels` object """ - if customization_id is None: - raise ValueError('customization_id must be provided') - headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='list_acoustic_models', + ) + headers.update(sdk_headers) + + params = { + 'language': language, + } + if 'headers' in kwargs: headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', - 'delete_acoustic_model') - headers.update(sdk_headers) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + url = '/v1/acoustic_customizations' + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) - url = '/v1/acoustic_customizations/{0}'.format( - *self._encode_path_vars(customization_id)) - response = self.request( - method='DELETE', url=url, headers=headers, accept_json=True) + response = self.send(request, **kwargs) return response - def get_acoustic_model(self, customization_id, **kwargs): + def get_acoustic_model( + self, + customization_id: str, + **kwargs, + ) -> DetailedResponse: """ Get a custom acoustic model. Gets information about a specified custom acoustic model. You must use credentials for the instance of the service that owns a model to list information about it. + **Note:** Acoustic model customization is supported only for use with + previous-generation models. It is not supported for large speech models and + next-generation models. **See also:** [Listing custom acoustic - models](https://cloud.ibm.com/docs/services/speech-to-text/acoustic-models.html#listModels-acoustic). + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageAcousticModels#listModels-acoustic). - :param str customization_id: The customization ID (GUID) of the custom acoustic - model that is to be used for the request. You must make the request with - credentials for the instance of the service that owns the custom model. + :param str customization_id: The customization ID (GUID) of the custom + acoustic model that is to be used for the request. You must make the + request with credentials for the instance of the service that owns the + custom model. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `AcousticModel` object """ - if customization_id is None: + if not customization_id: raise ValueError('customization_id must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', - 'get_acoustic_model') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='get_acoustic_model', + ) headers.update(sdk_headers) - url = '/v1/acoustic_customizations/{0}'.format( - *self._encode_path_vars(customization_id)) - response = self.request( - method='GET', url=url, headers=headers, accept_json=True) - return response - - def list_acoustic_models(self, language=None, **kwargs): - """ - List custom acoustic models. - - Lists information about all custom acoustic models that are owned by an instance - of the service. Use the `language` parameter to see all custom acoustic models for - the specified language. Omit the parameter to see all custom acoustic models for - all languages. You must use credentials for the instance of the service that owns - a model to list information about it. - **See also:** [Listing custom acoustic - models](https://cloud.ibm.com/docs/services/speech-to-text/acoustic-models.html#listModels-acoustic). - - :param str language: The identifier of the language for which custom language or - custom acoustic models are to be returned (for example, `en-US`). Omit the - parameter to see all custom language or custom acoustic models that are owned by - the requesting credentials. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - headers = {} if 'headers' in kwargs: headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', - 'list_acoustic_models') - headers.update(sdk_headers) - - params = {'language': language} - - url = '/v1/acoustic_customizations' - response = self.request( + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id'] + path_param_values = self.encode_path_vars(customization_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/acoustic_customizations/{customization_id}'.format( + **path_param_dict) + request = self.prepare_request( method='GET', url=url, headers=headers, - params=params, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response - def reset_acoustic_model(self, customization_id, **kwargs): + def delete_acoustic_model( + self, + customization_id: str, + **kwargs, + ) -> DetailedResponse: """ - Reset a custom acoustic model. + Delete a custom acoustic model. - Resets a custom acoustic model by removing all audio resources from the model. - Resetting a custom acoustic model initializes the model to its state when it was - first created. Metadata such as the name and language of the model are preserved, - but the model's audio resources are removed and must be re-created. You must use - credentials for the instance of the service that owns a model to reset it. - **See also:** [Resetting a custom acoustic - model](https://cloud.ibm.com/docs/services/speech-to-text/acoustic-models.html#resetModel-acoustic). + Deletes an existing custom acoustic model. The custom model cannot be deleted if + another request, such as adding an audio resource to the model, is currently being + processed. You must use credentials for the instance of the service that owns a + model to delete it. + **Note:** Acoustic model customization is supported only for use with + previous-generation models. It is not supported for large speech models and + next-generation models. + **See also:** [Deleting a custom acoustic + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageAcousticModels#deleteModel-acoustic). - :param str customization_id: The customization ID (GUID) of the custom acoustic - model that is to be used for the request. You must make the request with - credentials for the instance of the service that owns the custom model. + :param str customization_id: The customization ID (GUID) of the custom + acoustic model that is to be used for the request. You must make the + request with credentials for the instance of the service that owns the + custom model. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ - if customization_id is None: + if not customization_id: raise ValueError('customization_id must be provided') - headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='delete_acoustic_model', + ) + headers.update(sdk_headers) + if 'headers' in kwargs: headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', - 'reset_acoustic_model') - headers.update(sdk_headers) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id'] + path_param_values = self.encode_path_vars(customization_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/acoustic_customizations/{customization_id}'.format( + **path_param_dict) + request = self.prepare_request( + method='DELETE', + url=url, + headers=headers, + ) - url = '/v1/acoustic_customizations/{0}/reset'.format( - *self._encode_path_vars(customization_id)) - response = self.request( - method='POST', url=url, headers=headers, accept_json=True) + response = self.send(request, **kwargs) return response - def train_acoustic_model(self, - customization_id, - custom_language_model_id=None, - **kwargs): + def train_acoustic_model( + self, + customization_id: str, + *, + custom_language_model_id: Optional[str] = None, + strict: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: """ Train a custom acoustic model. @@ -2323,163 +3612,348 @@ def train_acoustic_model(self, data. The custom acoustic model does not reflect its changed data until you train it. You must use credentials for the instance of the service that owns a model to train it. - The training method is asynchronous. It can take on the order of minutes or hours - to complete depending on the total amount of audio data on which the custom - acoustic model is being trained and the current load on the service. Typically, - training a custom acoustic model takes approximately two to four times the length - of its audio data. The range of time depends on the model being trained and the - nature of the audio, such as whether the audio is clean or noisy. The method - returns an HTTP 200 response code to indicate that the training process has begun. - You can monitor the status of the training by using the **Get a custom acoustic - model** method to poll the model's status. Use a loop to check the status once a - minute. The method returns an `AcousticModel` object that includes `status` and - `progress` fields. A status of `available` indicates that the custom model is - trained and ready to use. The service cannot accept subsequent training requests, - or requests to add new audio resources, until the existing request completes. + The training method is asynchronous. Training time depends on the cumulative + amount of audio data that the custom acoustic model contains and the current load + on the service. When you train or retrain a model, the service uses all of the + model's audio data in the training. Training a custom acoustic model takes + approximately as long as the length of its cumulative audio data. For example, it + takes approximately 2 hours to train a model that contains a total of 2 hours of + audio. The method returns an HTTP 200 response code to indicate that the training + process has begun. + You can monitor the status of the training by using the [Get a custom acoustic + model](#getacousticmodel) method to poll the model's status. Use a loop to check + the status once a minute. The method returns an `AcousticModel` object that + includes `status` and `progress` fields. A status of `available` indicates that + the custom model is trained and ready to use. The service cannot train a model + while it is handling another request for the model. The service cannot accept + subsequent training requests, or requests to add new audio resources, until the + existing training request completes. You can use the optional `custom_language_model_id` parameter to specify the GUID of a separately created custom language model that is to be used during training. Train with a custom language model if you have verbatim transcriptions of the audio files that you have added to the custom model or you have either corpora (text files) or a list of words that are relevant to the contents of the audio - files. Both of the custom models must be based on the same version of the same - base model for training to succeed. - Training can fail to start for the following reasons: + files. For training to succeed, both of the custom models must be based on the + same version of the same base model, and the custom language model must be fully + trained and available. + **Note:** Acoustic model customization is supported only for use with + previous-generation models. It is not supported for large speech models and + next-generation models. + **See also:** + * [Train the custom acoustic + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-acoustic#trainModel-acoustic) + * [Using custom acoustic and custom language models + together](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-useBoth#useBoth) + ### Training failures + Training can fail to start for the following reasons: * The service is currently handling another request for the custom model, such as another training request or a request to add audio resources to the model. - * The custom model contains less than 10 minutes or more than 100 hours of audio - data. - * One or more of the custom model's audio resources is invalid. + * The custom model contains less than 10 minutes of audio that includes speech, + not silence. + * The custom model contains more than 50 hours of audio (for IBM Cloud) or more + that 200 hours of audio (for IBM Cloud Pak for Data). **Note:** For IBM Cloud, the + maximum hours of audio for a custom acoustic model was reduced from 200 to 50 + hours in August and September 2022. For more information, see [Maximum hours of + audio](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-audioResources#audioMaximum). + * You passed a custom language model with the `custom_language_model_id` query + parameter that is not in the available state. A custom language model must be + fully trained and available to be used to train a custom acoustic model. * You passed an incompatible custom language model with the `custom_language_model_id` query parameter. Both custom models must be based on the same version of the same base model. - **See also:** [Train the custom acoustic - model](https://cloud.ibm.com/docs/services/speech-to-text/acoustic-create.html#trainModel-acoustic). - - :param str customization_id: The customization ID (GUID) of the custom acoustic - model that is to be used for the request. You must make the request with - credentials for the instance of the service that owns the custom model. - :param str custom_language_model_id: The customization ID (GUID) of a custom - language model that is to be used during training of the custom acoustic model. - Specify a custom language model that has been trained with verbatim transcriptions - of the audio resources or that contains words that are relevant to the contents of - the audio resources. The custom language model must be based on the same version - of the same base model as the custom acoustic model. The credentials specified - with the request must own both custom models. + * The custom model contains one or more invalid audio resources. You can correct + the invalid audio resources or set the `strict` parameter to `false` to exclude + the invalid resources from the training. The model must contain at least one valid + resource for training to succeed. + + :param str customization_id: The customization ID (GUID) of the custom + acoustic model that is to be used for the request. You must make the + request with credentials for the instance of the service that owns the + custom model. + :param str custom_language_model_id: (optional) The customization ID (GUID) + of a custom language model that is to be used during training of the custom + acoustic model. Specify a custom language model that has been trained with + verbatim transcriptions of the audio resources or that contains words that + are relevant to the contents of the audio resources. The custom language + model must be based on the same version of the same base model as the + custom acoustic model, and the custom language model must be fully trained + and available. The credentials specified with the request must own both + custom models. + :param bool strict: (optional) If `false`, allows training of the custom + acoustic model to proceed as long as the model contains at least one valid + audio resource. The method returns an array of `TrainingWarning` objects + that lists any invalid resources. By default (`true`), training of a custom + acoustic model fails (status code 400) if the model contains one or more + invalid audio resources. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `TrainingResponse` object """ - if customization_id is None: + if not customization_id: raise ValueError('customization_id must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', - 'train_acoustic_model') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='train_acoustic_model', + ) headers.update(sdk_headers) - params = {'custom_language_model_id': custom_language_model_id} + params = { + 'custom_language_model_id': custom_language_model_id, + 'strict': strict, + } - url = '/v1/acoustic_customizations/{0}/train'.format( - *self._encode_path_vars(customization_id)) - response = self.request( + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id'] + path_param_values = self.encode_path_vars(customization_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/acoustic_customizations/{customization_id}/train'.format( + **path_param_dict) + request = self.prepare_request( method='POST', url=url, headers=headers, params=params, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response - def upgrade_acoustic_model(self, - customization_id, - custom_language_model_id=None, - force=None, - **kwargs): + def reset_acoustic_model( + self, + customization_id: str, + **kwargs, + ) -> DetailedResponse: """ - Upgrade a custom acoustic model. + Reset a custom acoustic model. - Initiates the upgrade of a custom acoustic model to the latest version of its base - language model. The upgrade method is asynchronous. It can take on the order of - minutes or hours to complete depending on the amount of data in the custom model - and the current load on the service; typically, upgrade takes approximately twice - the length of the total audio contained in the custom model. A custom model must + Resets a custom acoustic model by removing all audio resources from the model. + Resetting a custom acoustic model initializes the model to its state when it was + first created. Metadata such as the name and language of the model are preserved, + but the model's audio resources are removed and must be re-created. The service + cannot reset a model while it is handling another request for the model. The + service cannot accept subsequent requests for the model until the existing reset + request completes. You must use credentials for the instance of the service that + owns a model to reset it. + **Note:** Acoustic model customization is supported only for use with + previous-generation models. It is not supported for large speech models and + next-generation models. + **See also:** [Resetting a custom acoustic + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageAcousticModels#resetModel-acoustic). + + :param str customization_id: The customization ID (GUID) of the custom + acoustic model that is to be used for the request. You must make the + request with credentials for the instance of the service that owns the + custom model. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse + """ + + if not customization_id: + raise ValueError('customization_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='reset_acoustic_model', + ) + headers.update(sdk_headers) + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id'] + path_param_values = self.encode_path_vars(customization_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/acoustic_customizations/{customization_id}/reset'.format( + **path_param_dict) + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + ) + + response = self.send(request, **kwargs) + return response + + def upgrade_acoustic_model( + self, + customization_id: str, + *, + custom_language_model_id: Optional[str] = None, + force: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: + """ + Upgrade a custom acoustic model. + + Initiates the upgrade of a custom acoustic model to the latest version of its base + language model. The upgrade method is asynchronous. It can take on the order of + minutes or hours to complete depending on the amount of data in the custom model + and the current load on the service; typically, upgrade takes approximately twice + the length of the total audio contained in the custom model. A custom model must be in the `ready` or `available` state to be upgraded. You must use credentials for the instance of the service that owns a model to upgrade it. The method returns an HTTP 200 response code to indicate that the upgrade process has begun successfully. You can monitor the status of the upgrade by using the - **Get a custom acoustic model** method to poll the model's status. The method - returns an `AcousticModel` object that includes `status` and `progress` fields. - Use a loop to check the status once a minute. While it is being upgraded, the - custom model has the status `upgrading`. When the upgrade is complete, the model - resumes the status that it had prior to upgrade. The service cannot accept - subsequent requests for the model until the upgrade completes. + [Get a custom acoustic model](#getacousticmodel) method to poll the model's + status. The method returns an `AcousticModel` object that includes `status` and + `progress` fields. Use a loop to check the status once a minute. + While it is being upgraded, the custom model has the status `upgrading`. When the + upgrade is complete, the model resumes the status that it had prior to upgrade. + The service cannot upgrade a model while it is handling another request for the + model. The service cannot accept subsequent requests for the model until the + existing upgrade request completes. If the custom acoustic model was trained with a separately created custom language model, you must use the `custom_language_model_id` parameter to specify the GUID of that custom language model. The custom language model must be upgraded before the custom acoustic model can be upgraded. Omit the parameter if the custom acoustic model was not trained with a custom language model. + **Note:** Acoustic model customization is supported only for use with + previous-generation models. It is not supported for large speech models and + next-generation models. **See also:** [Upgrading a custom acoustic - model](https://cloud.ibm.com/docs/services/speech-to-text/custom-upgrade.html#upgradeAcoustic). - - :param str customization_id: The customization ID (GUID) of the custom acoustic - model that is to be used for the request. You must make the request with - credentials for the instance of the service that owns the custom model. - :param str custom_language_model_id: If the custom acoustic model was trained with - a custom language model, the customization ID (GUID) of that custom language - model. The custom language model must be upgraded before the custom acoustic model - can be upgraded. The credentials specified with the request must own both custom - models. - :param bool force: If `true`, forces the upgrade of a custom acoustic model for - which no input data has been modified since it was last trained. Use this - parameter only to force the upgrade of a custom acoustic model that is trained - with a custom language model, and only if you receive a 400 response code and the - message `No input data modified since last training`. See [Upgrading a custom - acoustic - model](https://cloud.ibm.com/docs/services/speech-to-text/custom-upgrade.html#upgradeAcoustic). + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-upgrade#custom-upgrade-acoustic). + + :param str customization_id: The customization ID (GUID) of the custom + acoustic model that is to be used for the request. You must make the + request with credentials for the instance of the service that owns the + custom model. + :param str custom_language_model_id: (optional) If the custom acoustic + model was trained with a custom language model, the customization ID (GUID) + of that custom language model. The custom language model must be upgraded + before the custom acoustic model can be upgraded. The custom language model + must be fully trained and available. The credentials specified with the + request must own both custom models. + :param bool force: (optional) If `true`, forces the upgrade of a custom + acoustic model for which no input data has been modified since it was last + trained. Use this parameter only to force the upgrade of a custom acoustic + model that is trained with a custom language model, and only if you receive + a 400 response code and the message `No input data modified since last + training`. See [Upgrading a custom acoustic + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-upgrade#custom-upgrade-acoustic). :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ - if customization_id is None: + if not customization_id: raise ValueError('customization_id must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', - 'upgrade_acoustic_model') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='upgrade_acoustic_model', + ) headers.update(sdk_headers) params = { 'custom_language_model_id': custom_language_model_id, - 'force': force + 'force': force, } - url = '/v1/acoustic_customizations/{0}/upgrade_model'.format( - *self._encode_path_vars(customization_id)) - response = self.request( + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id'] + path_param_values = self.encode_path_vars(customization_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/acoustic_customizations/{customization_id}/upgrade_model'.format( + **path_param_dict) + request = self.prepare_request( method='POST', url=url, headers=headers, params=params, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response ######################### # Custom audio resources ######################### - def add_audio(self, - customization_id, - audio_name, - audio_resource, - contained_content_type=None, - allow_overwrite=None, - content_type=None, - **kwargs): + def list_audio( + self, + customization_id: str, + **kwargs, + ) -> DetailedResponse: + """ + List audio resources. + + Lists information about all audio resources from a custom acoustic model. The + information includes the name of the resource and information about its audio + data, such as its duration. It also includes the status of the audio resource, + which is important for checking the service's analysis of the resource in response + to a request to add it to the custom acoustic model. You must use credentials for + the instance of the service that owns a model to list its audio resources. + **Note:** Acoustic model customization is supported only for use with + previous-generation models. It is not supported for large speech models and + next-generation models. + **See also:** [Listing audio resources for a custom acoustic + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageAudio#listAudio). + + :param str customization_id: The customization ID (GUID) of the custom + acoustic model that is to be used for the request. You must make the + request with credentials for the instance of the service that owns the + custom model. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `AudioResources` object + """ + + if not customization_id: + raise ValueError('customization_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='list_audio', + ) + headers.update(sdk_headers) + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id'] + path_param_values = self.encode_path_vars(customization_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/acoustic_customizations/{customization_id}/audio'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + ) + + response = self.send(request, **kwargs) + return response + + def add_audio( + self, + customization_id: str, + audio_name: str, + audio_resource: BinaryIO, + *, + content_type: Optional[str] = None, + contained_content_type: Optional[str] = None, + allow_overwrite: Optional[bool] = None, + **kwargs, + ) -> DetailedResponse: """ Add an audio resource. @@ -2487,35 +3961,42 @@ def add_audio(self, the acoustic characteristics of the audio that you plan to transcribe. You must use credentials for the instance of the service that owns a model to add an audio resource to it. Adding audio data does not affect the custom acoustic model until - you train the model for the new data by using the **Train a custom acoustic - model** method. + you train the model for the new data by using the [Train a custom acoustic + model](#trainacousticmodel) method. You can add individual audio files or an archive file that contains multiple audio files. Adding multiple audio files via a single archive file is significantly more efficient than adding each file individually. You can add audio resources in any format that the service supports for speech recognition. You can use this method to add any number of audio resources to a custom model by - calling the method once for each audio or archive file. But the addition of one - audio resource must be fully complete before you can add another. You must add a - minimum of 10 minutes and a maximum of 100 hours of audio that includes speech, - not just silence, to a custom acoustic model before you can train it. No audio - resource, audio- or archive-type, can be larger than 100 MB. To add an audio - resource that has the same name as an existing audio resource, set the - `allow_overwrite` parameter to `true`; otherwise, the request fails. - The method is asynchronous. It can take several seconds to complete depending on - the duration of the audio and, in the case of an archive file, the total number of - audio files being processed. The service returns a 201 response code if the audio - is valid. It then asynchronously analyzes the contents of the audio file or files - and automatically extracts information about the audio such as its length, - sampling rate, and encoding. You cannot submit requests to add additional audio - resources to a custom acoustic model, or to train the model, until the service's - analysis of all audio files for the current request completes. - To determine the status of the service's analysis of the audio, use the **Get an - audio resource** method to poll the status of the audio. The method accepts the - customization ID of the custom model and the name of the audio resource, and it - returns the status of the resource. Use a loop to check the status of the audio - every few seconds until it becomes `ok`. + calling the method once for each audio or archive file. You can add multiple + different audio resources at the same time. You must add a minimum of 10 minutes + of audio that includes speech, not just silence, to a custom acoustic model before + you can train it. No audio resource, audio- or archive-type, can be larger than + 100 MB. To add an audio resource that has the same name as an existing audio + resource, set the `allow_overwrite` parameter to `true`; otherwise, the request + fails. A custom model can contain no more than 50 hours of audio (for IBM Cloud) + or 200 hours of audio (for IBM Cloud Pak for Data). **Note:** For IBM Cloud, the + maximum hours of audio for a custom acoustic model was reduced from 200 to 50 + hours in August and September 2022. For more information, see [Maximum hours of + audio](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-audioResources#audioMaximum). + The method is asynchronous. It can take several seconds or minutes to complete + depending on the duration of the audio and, in the case of an archive file, the + total number of audio files being processed. The service returns a 201 response + code if the audio is valid. It then asynchronously analyzes the contents of the + audio file or files and automatically extracts information about the audio such as + its length, sampling rate, and encoding. You cannot submit requests to train or + upgrade the model until the service's analysis of all audio resources for current + requests completes. + To determine the status of the service's analysis of the audio, use the [Get an + audio resource](#getaudio) method to poll the status of the audio. The method + accepts the customization ID of the custom model and the name of the audio + resource, and it returns the status of the resource. Use a loop to check the + status of the audio every few seconds until it becomes `ok`. + **Note:** Acoustic model customization is supported only for use with + previous-generation models. It is not supported for large speech models and + next-generation models. **See also:** [Add audio to the custom acoustic - model](https://cloud.ibm.com/docs/services/speech-to-text/acoustic-create.html#addAudio). + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-acoustic#addAudio). ### Content types for audio-type resources You can add an individual audio file in any format that the service supports for speech recognition. For an audio-type resource, use the `Content-Type` parameter @@ -2543,8 +4024,8 @@ def add_audio(self, minimum required rate, the service down-samples the audio to the appropriate rate. If the sampling rate of the audio is lower than the minimum required rate, the service labels the audio file as `invalid`. - **See also:** [Audio - formats](https://cloud.ibm.com/docs/services/speech-to-text/audio-formats.html). + **See also:** [Supported audio + formats](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-audio-formats). ### Content types for archive-type resources You can add an archive file (**.zip** or **.tar.gz** file) that contains audio files in any format that the service supports for speech recognition. For an @@ -2565,218 +4046,252 @@ def add_audio(self, have the same format. Do not use the `Contained-Content-Type` header when adding an audio-type resource. ### Naming restrictions for embedded audio files - The name of an audio file that is embedded within an archive-type resource must - meet the following restrictions: - * Include a maximum of 128 characters in the file name; this includes the file - extension. - * Do not include spaces, slashes, or backslashes in the file name. - * Do not use the name of an audio file that has already been added to the custom - model as part of an archive-type resource. - - :param str customization_id: The customization ID (GUID) of the custom acoustic - model that is to be used for the request. You must make the request with - credentials for the instance of the service that owns the custom model. - :param str audio_name: The name of the new audio resource for the custom acoustic - model. Use a localized name that matches the language of the custom model and - reflects the contents of the resource. - * Include a maximum of 128 characters in the name. - * Do not include spaces, slashes, or backslashes in the name. - * Do not use the name of an audio resource that has already been added to the - custom model. - :param file audio_resource: The audio resource that is to be added to the custom - acoustic model, an individual audio file or an archive file. - :param str contained_content_type: **For an archive-type resource,** specify the - format of the audio files that are contained in the archive file if they are of - type `audio/alaw`, `audio/basic`, `audio/l16`, or `audio/mulaw`. Include the - `rate`, `channels`, and `endianness` parameters where necessary. In this case, all - audio files that are contained in the archive file must be of the indicated type. - For all other audio formats, you can omit the header. In this case, the audio - files can be of multiple types as long as they are not of the types listed in the - previous paragraph. - The parameter accepts all of the audio formats that are supported for use with - speech recognition. For more information, see **Content types for audio-type - resources** in the method description. - **For an audio-type resource,** omit the header. - :param bool allow_overwrite: If `true`, the specified audio resource overwrites an - existing audio resource with the same name. If `false`, the request fails if an - audio resource with the same name already exists. The parameter has no effect if - an audio resource with the same name does not already exist. - :param str content_type: For an audio-type resource, the format (MIME type) of the - audio. For more information, see **Content types for audio-type resources** in the - method description. - For an archive-type resource, the media type of the archive file. For more - information, see **Content types for archive-type resources** in the method - description. + The name of an audio file that is contained in an archive-type resource can + include a maximum of 128 characters. This includes the file extension and all + elements of the name (for example, slashes). + + :param str customization_id: The customization ID (GUID) of the custom + acoustic model that is to be used for the request. You must make the + request with credentials for the instance of the service that owns the + custom model. + :param str audio_name: The name of the new audio resource for the custom + acoustic model. Use a localized name that matches the language of the + custom model and reflects the contents of the resource. + * Include a maximum of 128 characters in the name. + * Do not use characters that need to be URL-encoded. For example, do not + use spaces, slashes, backslashes, colons, ampersands, double quotes, plus + signs, equals signs, questions marks, and so on in the name. (The service + does not prevent the use of these characters. But because they must be + URL-encoded wherever used, their use is strongly discouraged.) + * Do not use the name of an audio resource that has already been added to + the custom model. + :param BinaryIO audio_resource: The audio resource that is to be added to + the custom acoustic model, an individual audio file or an archive file. + With the `curl` command, use the `--data-binary` option to upload the file + for the request. + :param str content_type: (optional) For an audio-type resource, the format + (MIME type) of the audio. For more information, see **Content types for + audio-type resources** in the method description. + For an archive-type resource, the media type of the archive file. For more + information, see **Content types for archive-type resources** in the method + description. + :param str contained_content_type: (optional) _For an archive-type + resource_, specify the format of the audio files that are contained in the + archive file if they are of type `audio/alaw`, `audio/basic`, `audio/l16`, + or `audio/mulaw`. Include the `rate`, `channels`, and `endianness` + parameters where necessary. In this case, all audio files that are + contained in the archive file must be of the indicated type. + For all other audio formats, you can omit the header. In this case, the + audio files can be of multiple types as long as they are not of the types + listed in the previous paragraph. + The parameter accepts all of the audio formats that are supported for use + with speech recognition. For more information, see **Content types for + audio-type resources** in the method description. + _For an audio-type resource_, omit the header. + :param bool allow_overwrite: (optional) If `true`, the specified audio + resource overwrites an existing audio resource with the same name. If + `false`, the request fails if an audio resource with the same name already + exists. The parameter has no effect if an audio resource with the same name + does not already exist. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ - if customization_id is None: + if not customization_id: raise ValueError('customization_id must be provided') - if audio_name is None: + if not audio_name: raise ValueError('audio_name must be provided') if audio_resource is None: raise ValueError('audio_resource must be provided') - headers = { + 'Content-Type': content_type, 'Contained-Content-Type': contained_content_type, - 'Content-Type': content_type } - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', 'add_audio') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='add_audio', + ) headers.update(sdk_headers) - params = {'allow_overwrite': allow_overwrite} + params = { + 'allow_overwrite': allow_overwrite, + } data = audio_resource - url = '/v1/acoustic_customizations/{0}/audio/{1}'.format( - *self._encode_path_vars(customization_id, audio_name)) - response = self.request( + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id', 'audio_name'] + path_param_values = self.encode_path_vars(customization_id, audio_name) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/acoustic_customizations/{customization_id}/audio/{audio_name}'.format( + **path_param_dict) + request = self.prepare_request( method='POST', url=url, headers=headers, params=params, data=data, - accept_json=True) - return response - - def delete_audio(self, customization_id, audio_name, **kwargs): - """ - Delete an audio resource. - - Deletes an existing audio resource from a custom acoustic model. Deleting an - archive-type audio resource removes the entire archive of files; the current - interface does not allow deletion of individual files from an archive resource. - Removing an audio resource does not affect the custom model until you train the - model on its updated data by using the **Train a custom acoustic model** method. - You must use credentials for the instance of the service that owns a model to - delete its audio resources. - **See also:** [Deleting an audio resource from a custom acoustic - model](https://cloud.ibm.com/docs/services/speech-to-text/acoustic-audio.html#deleteAudio). - - :param str customization_id: The customization ID (GUID) of the custom acoustic - model that is to be used for the request. You must make the request with - credentials for the instance of the service that owns the custom model. - :param str audio_name: The name of the audio resource for the custom acoustic - model. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if customization_id is None: - raise ValueError('customization_id must be provided') - if audio_name is None: - raise ValueError('audio_name must be provided') + ) - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', 'delete_audio') - headers.update(sdk_headers) - - url = '/v1/acoustic_customizations/{0}/audio/{1}'.format( - *self._encode_path_vars(customization_id, audio_name)) - response = self.request( - method='DELETE', url=url, headers=headers, accept_json=True) + response = self.send(request, **kwargs) return response - def get_audio(self, customization_id, audio_name, **kwargs): + def get_audio( + self, + customization_id: str, + audio_name: str, + **kwargs, + ) -> DetailedResponse: """ Get an audio resource. Gets information about an audio resource from a custom acoustic model. The method returns an `AudioListing` object whose fields depend on the type of audio resource that you specify with the method's `audio_name` parameter: - * **For an audio-type resource,** the object's fields match those of an + * _For an audio-type resource_, the object's fields match those of an `AudioResource` object: `duration`, `name`, `details`, and `status`. - * **For an archive-type resource,** the object includes a `container` field whose + * _For an archive-type resource_, the object includes a `container` field whose fields match those of an `AudioResource` object. It also includes an `audio` field, which contains an array of `AudioResource` objects that provides information about the audio files that are contained in the archive. The information includes the status of the specified audio resource. The status is important for checking the service's analysis of a resource that you add to the custom model. - * For an audio-type resource, the `status` field is located in the `AudioListing` - object. - * For an archive-type resource, the `status` field is located in the + * _For an audio-type resource_, the `status` field is located in the + `AudioListing` object. + * _For an archive-type resource_, the `status` field is located in the `AudioResource` object that is returned in the `container` field. You must use credentials for the instance of the service that owns a model to list its audio resources. + **Note:** Acoustic model customization is supported only for use with + previous-generation models. It is not supported for large speech models and + next-generation models. **See also:** [Listing audio resources for a custom acoustic - model](https://cloud.ibm.com/docs/services/speech-to-text/acoustic-audio.html#listAudio). - - :param str customization_id: The customization ID (GUID) of the custom acoustic - model that is to be used for the request. You must make the request with - credentials for the instance of the service that owns the custom model. - :param str audio_name: The name of the audio resource for the custom acoustic - model. + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageAudio#listAudio). + + :param str customization_id: The customization ID (GUID) of the custom + acoustic model that is to be used for the request. You must make the + request with credentials for the instance of the service that owns the + custom model. + :param str audio_name: The name of the audio resource for the custom + acoustic model. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `AudioListing` object """ - if customization_id is None: + if not customization_id: raise ValueError('customization_id must be provided') - if audio_name is None: + if not audio_name: raise ValueError('audio_name must be provided') - headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='get_audio', + ) + headers.update(sdk_headers) + if 'headers' in kwargs: headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', 'get_audio') - headers.update(sdk_headers) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id', 'audio_name'] + path_param_values = self.encode_path_vars(customization_id, audio_name) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/acoustic_customizations/{customization_id}/audio/{audio_name}'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + ) - url = '/v1/acoustic_customizations/{0}/audio/{1}'.format( - *self._encode_path_vars(customization_id, audio_name)) - response = self.request( - method='GET', url=url, headers=headers, accept_json=True) + response = self.send(request, **kwargs) return response - def list_audio(self, customization_id, **kwargs): + def delete_audio( + self, + customization_id: str, + audio_name: str, + **kwargs, + ) -> DetailedResponse: """ - List audio resources. - - Lists information about all audio resources from a custom acoustic model. The - information includes the name of the resource and information about its audio - data, such as its duration. It also includes the status of the audio resource, - which is important for checking the service's analysis of the resource in response - to a request to add it to the custom acoustic model. You must use credentials for - the instance of the service that owns a model to list its audio resources. - **See also:** [Listing audio resources for a custom acoustic - model](https://cloud.ibm.com/docs/services/speech-to-text/acoustic-audio.html#listAudio). + Delete an audio resource. - :param str customization_id: The customization ID (GUID) of the custom acoustic - model that is to be used for the request. You must make the request with - credentials for the instance of the service that owns the custom model. + Deletes an existing audio resource from a custom acoustic model. Deleting an + archive-type audio resource removes the entire archive of files. The service does + not allow deletion of individual files from an archive resource. + Removing an audio resource does not affect the custom model until you train the + model on its updated data by using the [Train a custom acoustic + model](#trainacousticmodel) method. You can delete an existing audio resource from + a model while a different resource is being added to the model. You must use + credentials for the instance of the service that owns a model to delete its audio + resources. + **Note:** Acoustic model customization is supported only for use with + previous-generation models. It is not supported for large speech models and + next-generation models. + **See also:** [Deleting an audio resource from a custom acoustic + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageAudio#deleteAudio). + + :param str customization_id: The customization ID (GUID) of the custom + acoustic model that is to be used for the request. You must make the + request with credentials for the instance of the service that owns the + custom model. + :param str audio_name: The name of the audio resource for the custom + acoustic model. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ - if customization_id is None: + if not customization_id: raise ValueError('customization_id must be provided') - + if not audio_name: + raise ValueError('audio_name must be provided') headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='delete_audio', + ) + headers.update(sdk_headers) + if 'headers' in kwargs: headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', 'list_audio') - headers.update(sdk_headers) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id', 'audio_name'] + path_param_values = self.encode_path_vars(customization_id, audio_name) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/acoustic_customizations/{customization_id}/audio/{audio_name}'.format( + **path_param_dict) + request = self.prepare_request( + method='DELETE', + url=url, + headers=headers, + ) - url = '/v1/acoustic_customizations/{0}/audio'.format( - *self._encode_path_vars(customization_id)) - response = self.request( - method='GET', url=url, headers=headers, accept_json=True) + response = self.send(request, **kwargs) return response ######################### # User data ######################### - def delete_user_data(self, customer_id, **kwargs): + def delete_user_data( + self, + customer_id: str, + **kwargs, + ) -> DetailedResponse: """ Delete labeled data. @@ -2784,251 +4299,1048 @@ def delete_user_data(self, customer_id, **kwargs): deletes all data for the customer ID, regardless of the method by which the information was added. The method has no effect if no data is associated with the customer ID. You must issue the request with credentials for the same instance of - the service that was used to associate the customer ID with the data. - You associate a customer ID with data by passing the `X-Watson-Metadata` header - with a request that passes the data. + the service that was used to associate the customer ID with the data. You + associate a customer ID with data by passing the `X-Watson-Metadata` header with a + request that passes the data. + **Note:** If you delete an instance of the service from the service console, all + data associated with that service instance is automatically deleted. This includes + all custom language models, corpora, grammars, and words; all custom acoustic + models and audio resources; all registered endpoints for the asynchronous HTTP + interface; and all data related to speech recognition requests. **See also:** [Information - security](https://cloud.ibm.com/docs/services/speech-to-text/information-security.html). + security](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-information-security#information-security). - :param str customer_id: The customer ID for which all data is to be deleted. + :param str customer_id: The customer ID for which all data is to be + deleted. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ - if customer_id is None: + if not customer_id: raise ValueError('customer_id must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('speech_to_text', 'V1', - 'delete_user_data') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='delete_user_data', + ) headers.update(sdk_headers) - params = {'customer_id': customer_id} + params = { + 'customer_id': customer_id, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] url = '/v1/user_data' - response = self.request( + request = self.prepare_request( method='DELETE', url=url, headers=headers, params=params, - accept_json=False) - return response - - -############################################################################## -# Models -############################################################################## + ) + response = self.send(request, **kwargs) + return response -class AcousticModel(object): - """ - AcousticModel. - - :attr str customization_id: The customization ID (GUID) of the custom acoustic model. - The **Create a custom acoustic model** method returns only this field of the object; - it does not return the other fields. - :attr str created: (optional) The date and time in Coordinated Universal Time (UTC) at - which the custom acoustic model was created. The value is provided in full ISO 8601 - format (`YYYY-MM-DDThh:mm:ss.sTZD`). - :attr str language: (optional) The language identifier of the custom acoustic model - (for example, `en-US`). - :attr list[str] versions: (optional) A list of the available versions of the custom - acoustic model. Each element of the array indicates a version of the base model with - which the custom model can be used. Multiple versions exist only if the custom model - has been upgraded; otherwise, only a single version is shown. - :attr str owner: (optional) The GUID of the credentials for the instance of the - service that owns the custom acoustic model. - :attr str name: (optional) The name of the custom acoustic model. - :attr str description: (optional) The description of the custom acoustic model. - :attr str base_model_name: (optional) The name of the language model for which the - custom acoustic model was created. - :attr str status: (optional) The current status of the custom acoustic model: - * `pending`: The model was created but is waiting either for training data to be added - or for the service to finish analyzing added data. - * `ready`: The model contains data and is ready to be trained. - * `training`: The model is currently being trained. - * `available`: The model is trained and ready to use. - * `upgrading`: The model is currently being upgraded. - * `failed`: Training of the model failed. - :attr int progress: (optional) A percentage that indicates the progress of the custom - acoustic model's current training. A value of `100` means that the model is fully - trained. **Note:** The `progress` field does not currently reflect the progress of the - training. The field changes from `0` to `100` when training is complete. - :attr str warnings: (optional) If the request included unknown parameters, the - following message: `Unexpected query parameter(s) ['parameters'] detected`, where - `parameters` is a list that includes a quoted string for each unknown parameter. - """ + ######################### + # Language identification + ######################### - def __init__(self, - customization_id, - created=None, - language=None, - versions=None, - owner=None, - name=None, - description=None, - base_model_name=None, - status=None, - progress=None, - warnings=None): + def detect_language( + self, + lid_confidence: float, + audio: BinaryIO, + *, + content_type: Optional[str] = None, + **kwargs, + ) -> DetailedResponse: + """ + Spoken language identification. + + Detects the spoken language in audio streams. The endpoint is + `/v1/detect_language` and user can optionally include `lid_confidence` parameter + to set a custom confidence threshold for detection. The model continuously + processes incoming audio and returns the identified language when it reaches a + confidence level higher than the specified threshold (0.99 by default). See + [Spoken language + identification](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-speech-language-identification). + + :param float lid_confidence: Set a custom confidence threshold for + detection. + :param BinaryIO audio: The audio to transcribe. + :param str content_type: (optional) The type of the input. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `LanguageDetectionResults` object """ - Initialize a AcousticModel object. - :param str customization_id: The customization ID (GUID) of the custom acoustic - model. The **Create a custom acoustic model** method returns only this field of - the object; it does not return the other fields. - :param str created: (optional) The date and time in Coordinated Universal Time - (UTC) at which the custom acoustic model was created. The value is provided in - full ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`). - :param str language: (optional) The language identifier of the custom acoustic - model (for example, `en-US`). - :param list[str] versions: (optional) A list of the available versions of the - custom acoustic model. Each element of the array indicates a version of the base - model with which the custom model can be used. Multiple versions exist only if the - custom model has been upgraded; otherwise, only a single version is shown. - :param str owner: (optional) The GUID of the credentials for the instance of the - service that owns the custom acoustic model. - :param str name: (optional) The name of the custom acoustic model. - :param str description: (optional) The description of the custom acoustic model. - :param str base_model_name: (optional) The name of the language model for which - the custom acoustic model was created. - :param str status: (optional) The current status of the custom acoustic model: - * `pending`: The model was created but is waiting either for training data to be - added or for the service to finish analyzing added data. - * `ready`: The model contains data and is ready to be trained. - * `training`: The model is currently being trained. - * `available`: The model is trained and ready to use. - * `upgrading`: The model is currently being upgraded. - * `failed`: Training of the model failed. - :param int progress: (optional) A percentage that indicates the progress of the - custom acoustic model's current training. A value of `100` means that the model is - fully trained. **Note:** The `progress` field does not currently reflect the - progress of the training. The field changes from `0` to `100` when training is - complete. - :param str warnings: (optional) If the request included unknown parameters, the - following message: `Unexpected query parameter(s) ['parameters'] detected`, where - `parameters` is a list that includes a quoted string for each unknown parameter. - """ - self.customization_id = customization_id - self.created = created - self.language = language - self.versions = versions - self.owner = owner - self.name = name - self.description = description - self.base_model_name = base_model_name - self.status = status - self.progress = progress - self.warnings = warnings + if lid_confidence is None: + raise ValueError('lid_confidence must be provided') + if audio is None: + raise ValueError('audio must be provided') + headers = { + 'Content-Type': content_type, + } + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='detect_language', + ) + headers.update(sdk_headers) - @classmethod - def _from_dict(cls, _dict): - """Initialize a AcousticModel object from a json dictionary.""" - args = {} - if 'customization_id' in _dict: - args['customization_id'] = _dict.get('customization_id') - else: - raise ValueError( - 'Required property \'customization_id\' not present in AcousticModel JSON' - ) - if 'created' in _dict: - args['created'] = _dict.get('created') - if 'language' in _dict: - args['language'] = _dict.get('language') - if 'versions' in _dict: - args['versions'] = _dict.get('versions') - if 'owner' in _dict: - args['owner'] = _dict.get('owner') - if 'name' in _dict: - args['name'] = _dict.get('name') - if 'description' in _dict: - args['description'] = _dict.get('description') - if 'base_model_name' in _dict: - args['base_model_name'] = _dict.get('base_model_name') - if 'status' in _dict: - args['status'] = _dict.get('status') - if 'progress' in _dict: - args['progress'] = _dict.get('progress') - if 'warnings' in _dict: - args['warnings'] = _dict.get('warnings') - return cls(**args) + params = { + 'lid_confidence': lid_confidence, + } - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, - 'customization_id') and self.customization_id is not None: - _dict['customization_id'] = self.customization_id - if hasattr(self, 'created') and self.created is not None: - _dict['created'] = self.created - if hasattr(self, 'language') and self.language is not None: - _dict['language'] = self.language - if hasattr(self, 'versions') and self.versions is not None: - _dict['versions'] = self.versions - if hasattr(self, 'owner') and self.owner is not None: - _dict['owner'] = self.owner - if hasattr(self, 'name') and self.name is not None: - _dict['name'] = self.name - if hasattr(self, 'description') and self.description is not None: - _dict['description'] = self.description - if hasattr(self, - 'base_model_name') and self.base_model_name is not None: - _dict['base_model_name'] = self.base_model_name - if hasattr(self, 'status') and self.status is not None: - _dict['status'] = self.status - if hasattr(self, 'progress') and self.progress is not None: - _dict['progress'] = self.progress - if hasattr(self, 'warnings') and self.warnings is not None: - _dict['warnings'] = self.warnings - return _dict + data = audio - def __str__(self): - """Return a `str` version of this AcousticModel object.""" - return json.dumps(self._to_dict(), indent=2) + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ + url = '/v1/detect_language' + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + data=data, + ) - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other + response = self.send(request, **kwargs) + return response -class AcousticModels(object): +class GetModelEnums: + """ + Enums for get_model parameters. """ - AcousticModels. - :attr list[AcousticModel] customizations: An array of `AcousticModel` objects that - provides information about each available custom acoustic model. The array is empty if - the requesting credentials own no custom acoustic models (if no language is specified) - or own no custom acoustic models for the specified language. + class ModelId(str, Enum): + """ + The identifier of the model in the form of its name from the output of the [List + models](#listmodels) method. + """ + + AR_MS_BROADBANDMODEL = 'ar-MS_BroadbandModel' + AR_MS_TELEPHONY = 'ar-MS_Telephony' + CS_CZ_TELEPHONY = 'cs-CZ_Telephony' + DE_DE = 'de-DE' + DE_DE_BROADBANDMODEL = 'de-DE_BroadbandModel' + DE_DE_MULTIMEDIA = 'de-DE_Multimedia' + DE_DE_NARROWBANDMODEL = 'de-DE_NarrowbandModel' + DE_DE_TELEPHONY = 'de-DE_Telephony' + EN_AU = 'en-AU' + EN_AU_BROADBANDMODEL = 'en-AU_BroadbandModel' + EN_AU_MULTIMEDIA = 'en-AU_Multimedia' + EN_AU_NARROWBANDMODEL = 'en-AU_NarrowbandModel' + EN_AU_TELEPHONY = 'en-AU_Telephony' + EN_GB = 'en-GB' + EN_GB_BROADBANDMODEL = 'en-GB_BroadbandModel' + EN_GB_MULTIMEDIA = 'en-GB_Multimedia' + EN_GB_NARROWBANDMODEL = 'en-GB_NarrowbandModel' + EN_GB_TELEPHONY = 'en-GB_Telephony' + EN_IN = 'en-IN' + EN_IN_TELEPHONY = 'en-IN_Telephony' + EN_US = 'en-US' + EN_US_BROADBANDMODEL = 'en-US_BroadbandModel' + EN_US_MULTIMEDIA = 'en-US_Multimedia' + EN_US_NARROWBANDMODEL = 'en-US_NarrowbandModel' + EN_US_SHORTFORM_NARROWBANDMODEL = 'en-US_ShortForm_NarrowbandModel' + EN_US_TELEPHONY = 'en-US_Telephony' + EN_WW_MEDICAL_TELEPHONY = 'en-WW_Medical_Telephony' + ES_AR = 'es-AR' + ES_AR_BROADBANDMODEL = 'es-AR_BroadbandModel' + ES_AR_NARROWBANDMODEL = 'es-AR_NarrowbandModel' + ES_CL = 'es-CL' + ES_CL_BROADBANDMODEL = 'es-CL_BroadbandModel' + ES_CL_NARROWBANDMODEL = 'es-CL_NarrowbandModel' + ES_CO = 'es-CO' + ES_CO_BROADBANDMODEL = 'es-CO_BroadbandModel' + ES_CO_NARROWBANDMODEL = 'es-CO_NarrowbandModel' + ES_ES = 'es-ES' + ES_ES_BROADBANDMODEL = 'es-ES_BroadbandModel' + ES_ES_NARROWBANDMODEL = 'es-ES_NarrowbandModel' + ES_ES_MULTIMEDIA = 'es-ES_Multimedia' + ES_ES_TELEPHONY = 'es-ES_Telephony' + ES_LA_TELEPHONY = 'es-LA_Telephony' + ES_MX = 'es-MX' + ES_MX_BROADBANDMODEL = 'es-MX_BroadbandModel' + ES_MX_NARROWBANDMODEL = 'es-MX_NarrowbandModel' + ES_PE = 'es-PE' + ES_PE_BROADBANDMODEL = 'es-PE_BroadbandModel' + ES_PE_NARROWBANDMODEL = 'es-PE_NarrowbandModel' + FR_CA = 'fr-CA' + FR_CA_BROADBANDMODEL = 'fr-CA_BroadbandModel' + FR_CA_MULTIMEDIA = 'fr-CA_Multimedia' + FR_CA_NARROWBANDMODEL = 'fr-CA_NarrowbandModel' + FR_CA_TELEPHONY = 'fr-CA_Telephony' + FR_FR = 'fr-FR' + FR_FR_BROADBANDMODEL = 'fr-FR_BroadbandModel' + FR_FR_MULTIMEDIA = 'fr-FR_Multimedia' + FR_FR_NARROWBANDMODEL = 'fr-FR_NarrowbandModel' + FR_FR_TELEPHONY = 'fr-FR_Telephony' + HI_IN_TELEPHONY = 'hi-IN_Telephony' + IT_IT_BROADBANDMODEL = 'it-IT_BroadbandModel' + IT_IT_NARROWBANDMODEL = 'it-IT_NarrowbandModel' + IT_IT_MULTIMEDIA = 'it-IT_Multimedia' + IT_IT_TELEPHONY = 'it-IT_Telephony' + JA_JP = 'ja-JP' + JA_JP_BROADBANDMODEL = 'ja-JP_BroadbandModel' + JA_JP_MULTIMEDIA = 'ja-JP_Multimedia' + JA_JP_NARROWBANDMODEL = 'ja-JP_NarrowbandModel' + JA_JP_TELEPHONY = 'ja-JP_Telephony' + KO_KR_BROADBANDMODEL = 'ko-KR_BroadbandModel' + KO_KR_MULTIMEDIA = 'ko-KR_Multimedia' + KO_KR_NARROWBANDMODEL = 'ko-KR_NarrowbandModel' + KO_KR_TELEPHONY = 'ko-KR_Telephony' + NL_BE_TELEPHONY = 'nl-BE_Telephony' + NL_NL_BROADBANDMODEL = 'nl-NL_BroadbandModel' + NL_NL_MULTIMEDIA = 'nl-NL_Multimedia' + NL_NL_NARROWBANDMODEL = 'nl-NL_NarrowbandModel' + NL_NL_TELEPHONY = 'nl-NL_Telephony' + PT_BR = 'pt-BR' + PT_BR_BROADBANDMODEL = 'pt-BR_BroadbandModel' + PT_BR_MULTIMEDIA = 'pt-BR_Multimedia' + PT_BR_NARROWBANDMODEL = 'pt-BR_NarrowbandModel' + PT_BR_TELEPHONY = 'pt-BR_Telephony' + SV_SE_TELEPHONY = 'sv-SE_Telephony' + ZH_CN_BROADBANDMODEL = 'zh-CN_BroadbandModel' + ZH_CN_NARROWBANDMODEL = 'zh-CN_NarrowbandModel' + ZH_CN_TELEPHONY = 'zh-CN_Telephony' + + +class RecognizeEnums: + """ + Enums for recognize parameters. """ - def __init__(self, customizations): - """ - Initialize a AcousticModels object. + class ContentType(str, Enum): + """ + The format (MIME type) of the audio. For more information about specifying an + audio format, see **Audio formats (content types)** in the method description. + """ + + APPLICATION_OCTET_STREAM = 'application/octet-stream' + AUDIO_ALAW = 'audio/alaw' + AUDIO_BASIC = 'audio/basic' + AUDIO_FLAC = 'audio/flac' + AUDIO_G729 = 'audio/g729' + AUDIO_L16 = 'audio/l16' + AUDIO_MP3 = 'audio/mp3' + AUDIO_MPEG = 'audio/mpeg' + AUDIO_MULAW = 'audio/mulaw' + AUDIO_OGG = 'audio/ogg' + AUDIO_OGG_CODECS_OPUS = 'audio/ogg;codecs=opus' + AUDIO_OGG_CODECS_VORBIS = 'audio/ogg;codecs=vorbis' + AUDIO_WAV = 'audio/wav' + AUDIO_WEBM = 'audio/webm' + AUDIO_WEBM_CODECS_OPUS = 'audio/webm;codecs=opus' + AUDIO_WEBM_CODECS_VORBIS = 'audio/webm;codecs=vorbis' + + class Model(str, Enum): + """ + The model to use for speech recognition. If you omit the `model` parameter, the + service uses the US English `en-US_BroadbandModel` by default. + _For IBM Cloud Pak for Data,_ if you do not install the `en-US_BroadbandModel`, + you must either specify a model with the request or specify a new default model + for your installation of the service. + **See also:** + * [Using a model for speech + recognition](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-use) + * [Using the default + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-use#models-use-default). + """ + + AR_MS_BROADBANDMODEL = 'ar-MS_BroadbandModel' + AR_MS_TELEPHONY = 'ar-MS_Telephony' + CS_CZ_TELEPHONY = 'cs-CZ_Telephony' + DE_DE = 'de-DE' + DE_DE_BROADBANDMODEL = 'de-DE_BroadbandModel' + DE_DE_MULTIMEDIA = 'de-DE_Multimedia' + DE_DE_NARROWBANDMODEL = 'de-DE_NarrowbandModel' + DE_DE_TELEPHONY = 'de-DE_Telephony' + EN_AU = 'en-AU' + EN_AU_BROADBANDMODEL = 'en-AU_BroadbandModel' + EN_AU_MULTIMEDIA = 'en-AU_Multimedia' + EN_AU_NARROWBANDMODEL = 'en-AU_NarrowbandModel' + EN_AU_TELEPHONY = 'en-AU_Telephony' + EN_IN = 'en-IN' + EN_IN_TELEPHONY = 'en-IN_Telephony' + EN_GB = 'en-GB' + EN_GB_BROADBANDMODEL = 'en-GB_BroadbandModel' + EN_GB_MULTIMEDIA = 'en-GB_Multimedia' + EN_GB_NARROWBANDMODEL = 'en-GB_NarrowbandModel' + EN_GB_TELEPHONY = 'en-GB_Telephony' + EN_US = 'en-US' + EN_US_BROADBANDMODEL = 'en-US_BroadbandModel' + EN_US_MULTIMEDIA = 'en-US_Multimedia' + EN_US_NARROWBANDMODEL = 'en-US_NarrowbandModel' + EN_US_SHORTFORM_NARROWBANDMODEL = 'en-US_ShortForm_NarrowbandModel' + EN_US_TELEPHONY = 'en-US_Telephony' + EN_WW_MEDICAL_TELEPHONY = 'en-WW_Medical_Telephony' + ES_AR = 'es-AR' + ES_AR_BROADBANDMODEL = 'es-AR_BroadbandModel' + ES_AR_NARROWBANDMODEL = 'es-AR_NarrowbandModel' + ES_CL = 'es-CL' + ES_CL_BROADBANDMODEL = 'es-CL_BroadbandModel' + ES_CL_NARROWBANDMODEL = 'es-CL_NarrowbandModel' + ES_CO = 'es-CO' + ES_CO_BROADBANDMODEL = 'es-CO_BroadbandModel' + ES_CO_NARROWBANDMODEL = 'es-CO_NarrowbandModel' + ES_ES = 'es-ES' + ES_ES_BROADBANDMODEL = 'es-ES_BroadbandModel' + ES_ES_NARROWBANDMODEL = 'es-ES_NarrowbandModel' + ES_ES_MULTIMEDIA = 'es-ES_Multimedia' + ES_ES_TELEPHONY = 'es-ES_Telephony' + ES_LA_TELEPHONY = 'es-LA_Telephony' + ES_MX = 'es-MX' + ES_MX_BROADBANDMODEL = 'es-MX_BroadbandModel' + ES_MX_NARROWBANDMODEL = 'es-MX_NarrowbandModel' + ES_PE = 'es-PE' + ES_PE_BROADBANDMODEL = 'es-PE_BroadbandModel' + ES_PE_NARROWBANDMODEL = 'es-PE_NarrowbandModel' + FR_CA = 'fr-CA' + FR_CA_BROADBANDMODEL = 'fr-CA_BroadbandModel' + FR_CA_MULTIMEDIA = 'fr-CA_Multimedia' + FR_CA_NARROWBANDMODEL = 'fr-CA_NarrowbandModel' + FR_CA_TELEPHONY = 'fr-CA_Telephony' + FR_FR = 'fr-FR' + FR_FR_BROADBANDMODEL = 'fr-FR_BroadbandModel' + FR_FR_MULTIMEDIA = 'fr-FR_Multimedia' + FR_FR_NARROWBANDMODEL = 'fr-FR_NarrowbandModel' + FR_FR_TELEPHONY = 'fr-FR_Telephony' + HI_IN_TELEPHONY = 'hi-IN_Telephony' + IT_IT_BROADBANDMODEL = 'it-IT_BroadbandModel' + IT_IT_NARROWBANDMODEL = 'it-IT_NarrowbandModel' + IT_IT_MULTIMEDIA = 'it-IT_Multimedia' + IT_IT_TELEPHONY = 'it-IT_Telephony' + JA_JP = 'ja-JP' + JA_JP_BROADBANDMODEL = 'ja-JP_BroadbandModel' + JA_JP_MULTIMEDIA = 'ja-JP_Multimedia' + JA_JP_NARROWBANDMODEL = 'ja-JP_NarrowbandModel' + JA_JP_TELEPHONY = 'ja-JP_Telephony' + KO_KR_BROADBANDMODEL = 'ko-KR_BroadbandModel' + KO_KR_MULTIMEDIA = 'ko-KR_Multimedia' + KO_KR_NARROWBANDMODEL = 'ko-KR_NarrowbandModel' + KO_KR_TELEPHONY = 'ko-KR_Telephony' + NL_BE_TELEPHONY = 'nl-BE_Telephony' + NL_NL_BROADBANDMODEL = 'nl-NL_BroadbandModel' + NL_NL_MULTIMEDIA = 'nl-NL_Multimedia' + NL_NL_NARROWBANDMODEL = 'nl-NL_NarrowbandModel' + NL_NL_TELEPHONY = 'nl-NL_Telephony' + PT_BR = 'pt-BR' + PT_BR_BROADBANDMODEL = 'pt-BR_BroadbandModel' + PT_BR_MULTIMEDIA = 'pt-BR_Multimedia' + PT_BR_NARROWBANDMODEL = 'pt-BR_NarrowbandModel' + PT_BR_TELEPHONY = 'pt-BR_Telephony' + SV_SE_TELEPHONY = 'sv-SE_Telephony' + ZH_CN_BROADBANDMODEL = 'zh-CN_BroadbandModel' + ZH_CN_NARROWBANDMODEL = 'zh-CN_NarrowbandModel' + ZH_CN_TELEPHONY = 'zh-CN_Telephony' + + +class CreateJobEnums: + """ + Enums for create_job parameters. + """ - :param list[AcousticModel] customizations: An array of `AcousticModel` objects - that provides information about each available custom acoustic model. The array is - empty if the requesting credentials own no custom acoustic models (if no language - is specified) or own no custom acoustic models for the specified language. + class ContentType(str, Enum): + """ + The format (MIME type) of the audio. For more information about specifying an + audio format, see **Audio formats (content types)** in the method description. + """ + + APPLICATION_OCTET_STREAM = 'application/octet-stream' + AUDIO_ALAW = 'audio/alaw' + AUDIO_BASIC = 'audio/basic' + AUDIO_FLAC = 'audio/flac' + AUDIO_G729 = 'audio/g729' + AUDIO_L16 = 'audio/l16' + AUDIO_MP3 = 'audio/mp3' + AUDIO_MPEG = 'audio/mpeg' + AUDIO_MULAW = 'audio/mulaw' + AUDIO_OGG = 'audio/ogg' + AUDIO_OGG_CODECS_OPUS = 'audio/ogg;codecs=opus' + AUDIO_OGG_CODECS_VORBIS = 'audio/ogg;codecs=vorbis' + AUDIO_WAV = 'audio/wav' + AUDIO_WEBM = 'audio/webm' + AUDIO_WEBM_CODECS_OPUS = 'audio/webm;codecs=opus' + AUDIO_WEBM_CODECS_VORBIS = 'audio/webm;codecs=vorbis' + + class Model(str, Enum): + """ + The model to use for speech recognition. If you omit the `model` parameter, the + service uses the US English `en-US_BroadbandModel` by default. + _For IBM Cloud Pak for Data,_ if you do not install the `en-US_BroadbandModel`, + you must either specify a model with the request or specify a new default model + for your installation of the service. + **See also:** + * [Using a model for speech + recognition](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-use) + * [Using the default + model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-use#models-use-default). + """ + + AR_MS_BROADBANDMODEL = 'ar-MS_BroadbandModel' + AR_MS_TELEPHONY = 'ar-MS_Telephony' + CS_CZ_TELEPHONY = 'cs-CZ_Telephony' + DE_DE = 'de-DE' + DE_DE_BROADBANDMODEL = 'de-DE_BroadbandModel' + DE_DE_MULTIMEDIA = 'de-DE_Multimedia' + DE_DE_NARROWBANDMODEL = 'de-DE_NarrowbandModel' + DE_DE_TELEPHONY = 'de-DE_Telephony' + EN_AU = 'en-AU' + EN_AU_BROADBANDMODEL = 'en-AU_BroadbandModel' + EN_AU_MULTIMEDIA = 'en-AU_Multimedia' + EN_AU_NARROWBANDMODEL = 'en-AU_NarrowbandModel' + EN_AU_TELEPHONY = 'en-AU_Telephony' + EN_IN = 'en-IN' + EN_IN_TELEPHONY = 'en-IN_Telephony' + EN_GB = 'en-GB' + EN_GB_BROADBANDMODEL = 'en-GB_BroadbandModel' + EN_GB_MULTIMEDIA = 'en-GB_Multimedia' + EN_GB_NARROWBANDMODEL = 'en-GB_NarrowbandModel' + EN_GB_TELEPHONY = 'en-GB_Telephony' + EN_US = 'en-US' + EN_US_BROADBANDMODEL = 'en-US_BroadbandModel' + EN_US_MULTIMEDIA = 'en-US_Multimedia' + EN_US_NARROWBANDMODEL = 'en-US_NarrowbandModel' + EN_US_SHORTFORM_NARROWBANDMODEL = 'en-US_ShortForm_NarrowbandModel' + EN_US_TELEPHONY = 'en-US_Telephony' + EN_WW_MEDICAL_TELEPHONY = 'en-WW_Medical_Telephony' + ES_AR = 'es-AR' + ES_AR_BROADBANDMODEL = 'es-AR_BroadbandModel' + ES_AR_NARROWBANDMODEL = 'es-AR_NarrowbandModel' + ES_CL = 'es-CL' + ES_CL_BROADBANDMODEL = 'es-CL_BroadbandModel' + ES_CL_NARROWBANDMODEL = 'es-CL_NarrowbandModel' + ES_CO = 'es-CO' + ES_CO_BROADBANDMODEL = 'es-CO_BroadbandModel' + ES_CO_NARROWBANDMODEL = 'es-CO_NarrowbandModel' + ES_ES = 'es-ES' + ES_ES_BROADBANDMODEL = 'es-ES_BroadbandModel' + ES_ES_NARROWBANDMODEL = 'es-ES_NarrowbandModel' + ES_ES_MULTIMEDIA = 'es-ES_Multimedia' + ES_ES_TELEPHONY = 'es-ES_Telephony' + ES_LA_TELEPHONY = 'es-LA_Telephony' + ES_MX = 'es-MX' + ES_MX_BROADBANDMODEL = 'es-MX_BroadbandModel' + ES_MX_NARROWBANDMODEL = 'es-MX_NarrowbandModel' + ES_PE = 'es-PE' + ES_PE_BROADBANDMODEL = 'es-PE_BroadbandModel' + ES_PE_NARROWBANDMODEL = 'es-PE_NarrowbandModel' + FR_CA = 'fr-CA' + FR_CA_BROADBANDMODEL = 'fr-CA_BroadbandModel' + FR_CA_MULTIMEDIA = 'fr-CA_Multimedia' + FR_CA_NARROWBANDMODEL = 'fr-CA_NarrowbandModel' + FR_CA_TELEPHONY = 'fr-CA_Telephony' + FR_FR = 'fr-FR' + FR_FR_BROADBANDMODEL = 'fr-FR_BroadbandModel' + FR_FR_MULTIMEDIA = 'fr-FR_Multimedia' + FR_FR_NARROWBANDMODEL = 'fr-FR_NarrowbandModel' + FR_FR_TELEPHONY = 'fr-FR_Telephony' + HI_IN_TELEPHONY = 'hi-IN_Telephony' + IT_IT_BROADBANDMODEL = 'it-IT_BroadbandModel' + IT_IT_NARROWBANDMODEL = 'it-IT_NarrowbandModel' + IT_IT_MULTIMEDIA = 'it-IT_Multimedia' + IT_IT_TELEPHONY = 'it-IT_Telephony' + JA_JP = 'ja-JP' + JA_JP_BROADBANDMODEL = 'ja-JP_BroadbandModel' + JA_JP_MULTIMEDIA = 'ja-JP_Multimedia' + JA_JP_NARROWBANDMODEL = 'ja-JP_NarrowbandModel' + JA_JP_TELEPHONY = 'ja-JP_Telephony' + KO_KR_BROADBANDMODEL = 'ko-KR_BroadbandModel' + KO_KR_MULTIMEDIA = 'ko-KR_Multimedia' + KO_KR_NARROWBANDMODEL = 'ko-KR_NarrowbandModel' + KO_KR_TELEPHONY = 'ko-KR_Telephony' + NL_BE_TELEPHONY = 'nl-BE_Telephony' + NL_NL_BROADBANDMODEL = 'nl-NL_BroadbandModel' + NL_NL_MULTIMEDIA = 'nl-NL_Multimedia' + NL_NL_NARROWBANDMODEL = 'nl-NL_NarrowbandModel' + NL_NL_TELEPHONY = 'nl-NL_Telephony' + PT_BR = 'pt-BR' + PT_BR_BROADBANDMODEL = 'pt-BR_BroadbandModel' + PT_BR_MULTIMEDIA = 'pt-BR_Multimedia' + PT_BR_NARROWBANDMODEL = 'pt-BR_NarrowbandModel' + PT_BR_TELEPHONY = 'pt-BR_Telephony' + SV_SE_TELEPHONY = 'sv-SE_Telephony' + ZH_CN_BROADBANDMODEL = 'zh-CN_BroadbandModel' + ZH_CN_NARROWBANDMODEL = 'zh-CN_NarrowbandModel' + ZH_CN_TELEPHONY = 'zh-CN_Telephony' + + class Events(str, Enum): + """ + If the job includes a callback URL, a comma-separated list of notification events + to which to subscribe. Valid events are + * `recognitions.started` generates a callback notification when the service begins + to process the job. + * `recognitions.completed` generates a callback notification when the job is + complete. You must use the [Check a job](#checkjob) method to retrieve the results + before they time out or are deleted. + * `recognitions.completed_with_results` generates a callback notification when the + job is complete. The notification includes the results of the request. + * `recognitions.failed` generates a callback notification if the service + experiences an error while processing the job. + The `recognitions.completed` and `recognitions.completed_with_results` events are + incompatible. You can specify only of the two events. + If the job includes a callback URL, omit the parameter to subscribe to the default + events: `recognitions.started`, `recognitions.completed`, and + `recognitions.failed`. If the job does not include a callback URL, omit the + parameter. """ - self.customizations = customizations - @classmethod - def _from_dict(cls, _dict): - """Initialize a AcousticModels object from a json dictionary.""" - args = {} - if 'customizations' in _dict: + RECOGNITIONS_STARTED = 'recognitions.started' + RECOGNITIONS_COMPLETED = 'recognitions.completed' + RECOGNITIONS_COMPLETED_WITH_RESULTS = 'recognitions.completed_with_results' + RECOGNITIONS_FAILED = 'recognitions.failed' + + +class ListLanguageModelsEnums: + """ + Enums for list_language_models parameters. + """ + + class Language(str, Enum): + """ + The identifier of the language for which custom language or custom acoustic models + are to be returned. Specify the five-character language identifier; for example, + specify `en-US` to see all custom language or custom acoustic models that are + based on US English models. Omit the parameter to see all custom language or + custom acoustic models that are owned by the requesting credentials. + To determine the languages for which customization is available, see [Language + support for + customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-support). + """ + + AR_MS = 'ar-MS' + CS_CZ = 'cs-CZ' + DE_DE = 'de-DE' + EN_AU = 'en-AU' + EN_GB = 'en-GB' + EN_IN = 'en-IN' + EN_US = 'en-US' + EN_WW = 'en-WW' + ES_AR = 'es-AR' + ES_CL = 'es-CL' + ES_CO = 'es-CO' + ES_ES = 'es-ES' + ES_LA = 'es-LA' + ES_MX = 'es-MX' + ES_PE = 'es-PE' + FR_CA = 'fr-CA' + FR_FR = 'fr-FR' + HI_IN = 'hi-IN' + IT_IT = 'it-IT' + JA_JP = 'ja-JP' + KO_KR = 'ko-KR' + NL_BE = 'nl-BE' + NL_NL = 'nl-NL' + PT_BR = 'pt-BR' + SV_SE = 'sv-SE' + ZH_CN = 'zh-CN' + + +class TrainLanguageModelEnums: + """ + Enums for train_language_model parameters. + """ + + class WordTypeToAdd(str, Enum): + """ + _For custom models that are based on previous-generation models_, the type of + words from the custom language model's words resource on which to train the model: + * `all` (the default) trains the model on all new words, regardless of whether + they were extracted from corpora or grammars or were added or modified by the + user. + * `user` trains the model only on custom words that were added or modified by the + user directly. The model is not trained on new words extracted from corpora or + grammars. + _For custom models that are based on large speech models and next-generation + models_, the service ignores the `word_type_to_add` parameter. The words resource + contains only custom words that the user adds or modifies directly, so the + parameter is unnecessary. + """ + + ALL = 'all' + USER = 'user' + + +class ListWordsEnums: + """ + Enums for list_words parameters. + """ + + class WordType(str, Enum): + """ + The type of words to be listed from the custom language model's words resource: + * `all` (the default) shows all words. + * `user` shows only custom words that were added or modified by the user directly. + * `corpora` shows only OOV that were extracted from corpora. + * `grammars` shows only OOV words that are recognized by grammars. + _For a custom model that is based on a next-generation model_, only `all` and + `user` apply. Both options return the same results. Words from other sources are + not added to custom models that are based on next-generation models. + """ + + ALL = 'all' + USER = 'user' + CORPORA = 'corpora' + GRAMMARS = 'grammars' + + class Sort(str, Enum): + """ + Indicates the order in which the words are to be listed, `alphabetical` or by + `count`. You can prepend an optional `+` or `-` to an argument to indicate whether + the results are to be sorted in ascending or descending order. By default, words + are sorted in ascending alphabetical order. For alphabetical ordering, the + lexicographical precedence is numeric values, uppercase letters, and lowercase + letters. For count ordering, values with the same count are ordered + alphabetically. With the `curl` command, URL-encode the `+` symbol as `%2B`. + """ + + ALPHABETICAL = 'alphabetical' + COUNT = 'count' + + +class AddGrammarEnums: + """ + Enums for add_grammar parameters. + """ + + class ContentType(str, Enum): + """ + The format (MIME type) of the grammar file: + * `application/srgs` for Augmented Backus-Naur Form (ABNF), which uses a + plain-text representation that is similar to traditional BNF grammars. + * `application/srgs+xml` for XML Form, which uses XML elements to represent the + grammar. + """ + + APPLICATION_SRGS = 'application/srgs' + APPLICATION_SRGS_XML = 'application/srgs+xml' + + +class ListAcousticModelsEnums: + """ + Enums for list_acoustic_models parameters. + """ + + class Language(str, Enum): + """ + The identifier of the language for which custom language or custom acoustic models + are to be returned. Specify the five-character language identifier; for example, + specify `en-US` to see all custom language or custom acoustic models that are + based on US English models. Omit the parameter to see all custom language or + custom acoustic models that are owned by the requesting credentials. + To determine the languages for which customization is available, see [Language + support for + customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-support). + """ + + AR_MS = 'ar-MS' + CS_CZ = 'cs-CZ' + DE_DE = 'de-DE' + EN_AU = 'en-AU' + EN_GB = 'en-GB' + EN_IN = 'en-IN' + EN_US = 'en-US' + EN_WW = 'en-WW' + ES_AR = 'es-AR' + ES_CL = 'es-CL' + ES_CO = 'es-CO' + ES_ES = 'es-ES' + ES_LA = 'es-LA' + ES_MX = 'es-MX' + ES_PE = 'es-PE' + FR_CA = 'fr-CA' + FR_FR = 'fr-FR' + HI_IN = 'hi-IN' + IT_IT = 'it-IT' + JA_JP = 'ja-JP' + KO_KR = 'ko-KR' + NL_BE = 'nl-BE' + NL_NL = 'nl-NL' + PT_BR = 'pt-BR' + SV_SE = 'sv-SE' + ZH_CN = 'zh-CN' + + +class AddAudioEnums: + """ + Enums for add_audio parameters. + """ + + class ContentType(str, Enum): + """ + For an audio-type resource, the format (MIME type) of the audio. For more + information, see **Content types for audio-type resources** in the method + description. + For an archive-type resource, the media type of the archive file. For more + information, see **Content types for archive-type resources** in the method + description. + """ + + APPLICATION_ZIP = 'application/zip' + APPLICATION_GZIP = 'application/gzip' + AUDIO_ALAW = 'audio/alaw' + AUDIO_BASIC = 'audio/basic' + AUDIO_FLAC = 'audio/flac' + AUDIO_G729 = 'audio/g729' + AUDIO_L16 = 'audio/l16' + AUDIO_MP3 = 'audio/mp3' + AUDIO_MPEG = 'audio/mpeg' + AUDIO_MULAW = 'audio/mulaw' + AUDIO_OGG = 'audio/ogg' + AUDIO_OGG_CODECS_OPUS = 'audio/ogg;codecs=opus' + AUDIO_OGG_CODECS_VORBIS = 'audio/ogg;codecs=vorbis' + AUDIO_WAV = 'audio/wav' + AUDIO_WEBM = 'audio/webm' + AUDIO_WEBM_CODECS_OPUS = 'audio/webm;codecs=opus' + AUDIO_WEBM_CODECS_VORBIS = 'audio/webm;codecs=vorbis' + + class ContainedContentType(str, Enum): + """ + _For an archive-type resource_, specify the format of the audio files that are + contained in the archive file if they are of type `audio/alaw`, `audio/basic`, + `audio/l16`, or `audio/mulaw`. Include the `rate`, `channels`, and `endianness` + parameters where necessary. In this case, all audio files that are contained in + the archive file must be of the indicated type. + For all other audio formats, you can omit the header. In this case, the audio + files can be of multiple types as long as they are not of the types listed in the + previous paragraph. + The parameter accepts all of the audio formats that are supported for use with + speech recognition. For more information, see **Content types for audio-type + resources** in the method description. + _For an audio-type resource_, omit the header. + """ + + AUDIO_ALAW = 'audio/alaw' + AUDIO_BASIC = 'audio/basic' + AUDIO_FLAC = 'audio/flac' + AUDIO_G729 = 'audio/g729' + AUDIO_L16 = 'audio/l16' + AUDIO_MP3 = 'audio/mp3' + AUDIO_MPEG = 'audio/mpeg' + AUDIO_MULAW = 'audio/mulaw' + AUDIO_OGG = 'audio/ogg' + AUDIO_OGG_CODECS_OPUS = 'audio/ogg;codecs=opus' + AUDIO_OGG_CODECS_VORBIS = 'audio/ogg;codecs=vorbis' + AUDIO_WAV = 'audio/wav' + AUDIO_WEBM = 'audio/webm' + AUDIO_WEBM_CODECS_OPUS = 'audio/webm;codecs=opus' + AUDIO_WEBM_CODECS_VORBIS = 'audio/webm;codecs=vorbis' + + +class DetectLanguageEnums: + """ + Enums for detect_language parameters. + """ + + class ContentType(str, Enum): + """ + The type of the input. + """ + + APPLICATION_OCTET_STREAM = 'application/octet-stream' + AUDIO_ALAW = 'audio/alaw' + AUDIO_BASIC = 'audio/basic' + AUDIO_FLAC = 'audio/flac' + AUDIO_G729 = 'audio/g729' + AUDIO_L16 = 'audio/l16' + AUDIO_MP3 = 'audio/mp3' + AUDIO_MPEG = 'audio/mpeg' + AUDIO_MULAW = 'audio/mulaw' + AUDIO_OGG = 'audio/ogg' + AUDIO_OGG_CODECS_OPUS = 'audio/ogg;codecs=opus' + AUDIO_OGG_CODECS_VORBIS = 'audio/ogg;codecs=vorbis' + AUDIO_WAV = 'audio/wav' + AUDIO_WEBM = 'audio/webm' + AUDIO_WEBM_CODECS_OPUS = 'audio/webm;codecs=opus' + AUDIO_WEBM_CODECS_VORBIS = 'audio/webm;codecs=vorbis' + + +############################################################################## +# Models +############################################################################## + + +class AcousticModel: + """ + Information about an existing custom acoustic model. + + :param str customization_id: The customization ID (GUID) of the custom acoustic + model. The [Create a custom acoustic model](#createacousticmodel) method returns + only this field of the object; it does not return the other fields. + :param str created: (optional) The date and time in Coordinated Universal Time + (UTC) at which the custom acoustic model was created. The value is provided in + full ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`). + :param str updated: (optional) The date and time in Coordinated Universal Time + (UTC) at which the custom acoustic model was last modified. The `created` and + `updated` fields are equal when an acoustic model is first added but has yet to + be updated. The value is provided in full ISO 8601 format + (YYYY-MM-DDThh:mm:ss.sTZD). + :param str language: (optional) The language identifier of the custom acoustic + model (for example, `en-US`). + :param List[str] versions: (optional) A list of the available versions of the + custom acoustic model. Each element of the array indicates a version of the base + model with which the custom model can be used. Multiple versions exist only if + the custom model has been upgraded to a new version of its base model. + Otherwise, only a single version is shown. + :param str owner: (optional) The GUID of the credentials for the instance of the + service that owns the custom acoustic model. + :param str name: (optional) The name of the custom acoustic model. + :param str description: (optional) The description of the custom acoustic model. + :param str base_model_name: (optional) The name of the language model for which + the custom acoustic model was created. + :param str status: (optional) The current status of the custom acoustic model: + * `pending`: The model was created but is waiting either for valid training data + to be added or for the service to finish analyzing added data. + * `ready`: The model contains valid data and is ready to be trained. If the + model contains a mix of valid and invalid resources, you need to set the + `strict` parameter to `false` for the training to proceed. + * `training`: The model is currently being trained. + * `available`: The model is trained and ready to use. + * `upgrading`: The model is currently being upgraded. + * `failed`: Training of the model failed. + :param int progress: (optional) A percentage that indicates the progress of the + custom acoustic model's current training. A value of `100` means that the model + is fully trained. **Note:** The `progress` field does not currently reflect the + progress of the training. The field changes from `0` to `100` when training is + complete. + :param str warnings: (optional) If the request included unknown parameters, the + following message: `Unexpected query parameter(s) ['parameters'] detected`, + where `parameters` is a list that includes a quoted string for each unknown + parameter. + """ + + def __init__( + self, + customization_id: str, + *, + created: Optional[str] = None, + updated: Optional[str] = None, + language: Optional[str] = None, + versions: Optional[List[str]] = None, + owner: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + base_model_name: Optional[str] = None, + status: Optional[str] = None, + progress: Optional[int] = None, + warnings: Optional[str] = None, + ) -> None: + """ + Initialize a AcousticModel object. + + :param str customization_id: The customization ID (GUID) of the custom + acoustic model. The [Create a custom acoustic model](#createacousticmodel) + method returns only this field of the object; it does not return the other + fields. + :param str created: (optional) The date and time in Coordinated Universal + Time (UTC) at which the custom acoustic model was created. The value is + provided in full ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`). + :param str updated: (optional) The date and time in Coordinated Universal + Time (UTC) at which the custom acoustic model was last modified. The + `created` and `updated` fields are equal when an acoustic model is first + added but has yet to be updated. The value is provided in full ISO 8601 + format (YYYY-MM-DDThh:mm:ss.sTZD). + :param str language: (optional) The language identifier of the custom + acoustic model (for example, `en-US`). + :param List[str] versions: (optional) A list of the available versions of + the custom acoustic model. Each element of the array indicates a version of + the base model with which the custom model can be used. Multiple versions + exist only if the custom model has been upgraded to a new version of its + base model. Otherwise, only a single version is shown. + :param str owner: (optional) The GUID of the credentials for the instance + of the service that owns the custom acoustic model. + :param str name: (optional) The name of the custom acoustic model. + :param str description: (optional) The description of the custom acoustic + model. + :param str base_model_name: (optional) The name of the language model for + which the custom acoustic model was created. + :param str status: (optional) The current status of the custom acoustic + model: + * `pending`: The model was created but is waiting either for valid training + data to be added or for the service to finish analyzing added data. + * `ready`: The model contains valid data and is ready to be trained. If the + model contains a mix of valid and invalid resources, you need to set the + `strict` parameter to `false` for the training to proceed. + * `training`: The model is currently being trained. + * `available`: The model is trained and ready to use. + * `upgrading`: The model is currently being upgraded. + * `failed`: Training of the model failed. + :param int progress: (optional) A percentage that indicates the progress of + the custom acoustic model's current training. A value of `100` means that + the model is fully trained. **Note:** The `progress` field does not + currently reflect the progress of the training. The field changes from `0` + to `100` when training is complete. + :param str warnings: (optional) If the request included unknown parameters, + the following message: `Unexpected query parameter(s) ['parameters'] + detected`, where `parameters` is a list that includes a quoted string for + each unknown parameter. + """ + self.customization_id = customization_id + self.created = created + self.updated = updated + self.language = language + self.versions = versions + self.owner = owner + self.name = name + self.description = description + self.base_model_name = base_model_name + self.status = status + self.progress = progress + self.warnings = warnings + + @classmethod + def from_dict(cls, _dict: Dict) -> 'AcousticModel': + """Initialize a AcousticModel object from a json dictionary.""" + args = {} + if (customization_id := _dict.get('customization_id')) is not None: + args['customization_id'] = customization_id + else: + raise ValueError( + 'Required property \'customization_id\' not present in AcousticModel JSON' + ) + if (created := _dict.get('created')) is not None: + args['created'] = created + if (updated := _dict.get('updated')) is not None: + args['updated'] = updated + if (language := _dict.get('language')) is not None: + args['language'] = language + if (versions := _dict.get('versions')) is not None: + args['versions'] = versions + if (owner := _dict.get('owner')) is not None: + args['owner'] = owner + if (name := _dict.get('name')) is not None: + args['name'] = name + if (description := _dict.get('description')) is not None: + args['description'] = description + if (base_model_name := _dict.get('base_model_name')) is not None: + args['base_model_name'] = base_model_name + if (status := _dict.get('status')) is not None: + args['status'] = status + if (progress := _dict.get('progress')) is not None: + args['progress'] = progress + if (warnings := _dict.get('warnings')) is not None: + args['warnings'] = warnings + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a AcousticModel object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, + 'customization_id') and self.customization_id is not None: + _dict['customization_id'] = self.customization_id + if hasattr(self, 'created') and self.created is not None: + _dict['created'] = self.created + if hasattr(self, 'updated') and self.updated is not None: + _dict['updated'] = self.updated + if hasattr(self, 'language') and self.language is not None: + _dict['language'] = self.language + if hasattr(self, 'versions') and self.versions is not None: + _dict['versions'] = self.versions + if hasattr(self, 'owner') and self.owner is not None: + _dict['owner'] = self.owner + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, + 'base_model_name') and self.base_model_name is not None: + _dict['base_model_name'] = self.base_model_name + if hasattr(self, 'status') and self.status is not None: + _dict['status'] = self.status + if hasattr(self, 'progress') and self.progress is not None: + _dict['progress'] = self.progress + if hasattr(self, 'warnings') and self.warnings is not None: + _dict['warnings'] = self.warnings + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this AcousticModel object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'AcousticModel') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'AcousticModel') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class StatusEnum(str, Enum): + """ + The current status of the custom acoustic model: + * `pending`: The model was created but is waiting either for valid training data + to be added or for the service to finish analyzing added data. + * `ready`: The model contains valid data and is ready to be trained. If the model + contains a mix of valid and invalid resources, you need to set the `strict` + parameter to `false` for the training to proceed. + * `training`: The model is currently being trained. + * `available`: The model is trained and ready to use. + * `upgrading`: The model is currently being upgraded. + * `failed`: Training of the model failed. + """ + + PENDING = 'pending' + READY = 'ready' + TRAINING = 'training' + AVAILABLE = 'available' + UPGRADING = 'upgrading' + FAILED = 'failed' + + +class AcousticModels: + """ + Information about existing custom acoustic models. + + :param List[AcousticModel] customizations: An array of `AcousticModel` objects + that provides information about each available custom acoustic model. The array + is empty if the requesting credentials own no custom acoustic models (if no + language is specified) or own no custom acoustic models for the specified + language. + """ + + def __init__( + self, + customizations: List['AcousticModel'], + ) -> None: + """ + Initialize a AcousticModels object. + + :param List[AcousticModel] customizations: An array of `AcousticModel` + objects that provides information about each available custom acoustic + model. The array is empty if the requesting credentials own no custom + acoustic models (if no language is specified) or own no custom acoustic + models for the specified language. + """ + self.customizations = customizations + + @classmethod + def from_dict(cls, _dict: Dict) -> 'AcousticModels': + """Initialize a AcousticModels object from a json dictionary.""" + args = {} + if (customizations := _dict.get('customizations')) is not None: args['customizations'] = [ - AcousticModel._from_dict(x) - for x in (_dict.get('customizations')) + AcousticModel.from_dict(v) for v in customizations ] else: raise ValueError( @@ -3036,70 +5348,94 @@ def _from_dict(cls, _dict): ) return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a AcousticModels object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'customizations') and self.customizations is not None: - _dict['customizations'] = [ - x._to_dict() for x in self.customizations - ] + customizations_list = [] + for v in self.customizations: + if isinstance(v, dict): + customizations_list.append(v) + else: + customizations_list.append(v.to_dict()) + _dict['customizations'] = customizations_list return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this AcousticModels object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'AcousticModels') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'AcousticModels') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class AudioDetails(object): +class AudioDetails: """ - AudioDetails. - - :attr str type: (optional) The type of the audio resource: - * `audio` for an individual audio file - * `archive` for an archive (**.zip** or **.tar.gz**) file that contains audio files - * `undetermined` for a resource that the service cannot validate (for example, if the - user mistakenly passes a file that does not contain audio, such as a JPEG file). - :attr str codec: (optional) **For an audio-type resource,** the codec in which the - audio is encoded. Omitted for an archive-type resource. - :attr int frequency: (optional) **For an audio-type resource,** the sampling rate of - the audio in Hertz (samples per second). Omitted for an archive-type resource. - :attr str compression: (optional) **For an archive-type resource,** the format of the - compressed archive: - * `zip` for a **.zip** file - * `gzip` for a **.tar.gz** file - Omitted for an audio-type resource. + Information about an audio resource from a custom acoustic model. + + :param str type: (optional) The type of the audio resource: + * `audio` for an individual audio file + * `archive` for an archive (**.zip** or **.tar.gz**) file that contains audio + files + * `undetermined` for a resource that the service cannot validate (for example, + if the user mistakenly passes a file that does not contain audio, such as a JPEG + file). + :param str codec: (optional) _For an audio-type resource_, the codec in which + the audio is encoded. Omitted for an archive-type resource. + :param int frequency: (optional) _For an audio-type resource_, the sampling rate + of the audio in Hertz (samples per second). Omitted for an archive-type + resource. + :param str compression: (optional) _For an archive-type resource_, the format of + the compressed archive: + * `zip` for a **.zip** file + * `gzip` for a **.tar.gz** file + Omitted for an audio-type resource. """ - def __init__(self, type=None, codec=None, frequency=None, compression=None): + def __init__( + self, + *, + type: Optional[str] = None, + codec: Optional[str] = None, + frequency: Optional[int] = None, + compression: Optional[str] = None, + ) -> None: """ Initialize a AudioDetails object. :param str type: (optional) The type of the audio resource: - * `audio` for an individual audio file - * `archive` for an archive (**.zip** or **.tar.gz**) file that contains audio - files - * `undetermined` for a resource that the service cannot validate (for example, if - the user mistakenly passes a file that does not contain audio, such as a JPEG - file). - :param str codec: (optional) **For an audio-type resource,** the codec in which - the audio is encoded. Omitted for an archive-type resource. - :param int frequency: (optional) **For an audio-type resource,** the sampling rate - of the audio in Hertz (samples per second). Omitted for an archive-type resource. - :param str compression: (optional) **For an archive-type resource,** the format of - the compressed archive: - * `zip` for a **.zip** file - * `gzip` for a **.tar.gz** file - Omitted for an audio-type resource. + * `audio` for an individual audio file + * `archive` for an archive (**.zip** or **.tar.gz**) file that contains + audio files + * `undetermined` for a resource that the service cannot validate (for + example, if the user mistakenly passes a file that does not contain audio, + such as a JPEG file). + :param str codec: (optional) _For an audio-type resource_, the codec in + which the audio is encoded. Omitted for an archive-type resource. + :param int frequency: (optional) _For an audio-type resource_, the sampling + rate of the audio in Hertz (samples per second). Omitted for an + archive-type resource. + :param str compression: (optional) _For an archive-type resource_, the + format of the compressed archive: + * `zip` for a **.zip** file + * `gzip` for a **.tar.gz** file + Omitted for an audio-type resource. """ self.type = type self.codec = codec @@ -3107,20 +5443,25 @@ def __init__(self, type=None, codec=None, frequency=None, compression=None): self.compression = compression @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'AudioDetails': """Initialize a AudioDetails object from a json dictionary.""" args = {} - if 'type' in _dict: - args['type'] = _dict.get('type') - if 'codec' in _dict: - args['codec'] = _dict.get('codec') - if 'frequency' in _dict: - args['frequency'] = _dict.get('frequency') - if 'compression' in _dict: - args['compression'] = _dict.get('compression') + if (type := _dict.get('type')) is not None: + args['type'] = type + if (codec := _dict.get('codec')) is not None: + args['codec'] = codec + if (frequency := _dict.get('frequency')) is not None: + args['frequency'] = frequency + if (compression := _dict.get('compression')) is not None: + args['compression'] = compression return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a AudioDetails object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'type') and self.type is not None: @@ -3133,85 +5474,121 @@ def _to_dict(self): _dict['compression'] = self.compression return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this AudioDetails object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'AudioDetails') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'AudioDetails') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other + class TypeEnum(str, Enum): + """ + The type of the audio resource: + * `audio` for an individual audio file + * `archive` for an archive (**.zip** or **.tar.gz**) file that contains audio + files + * `undetermined` for a resource that the service cannot validate (for example, if + the user mistakenly passes a file that does not contain audio, such as a JPEG + file). + """ + + AUDIO = 'audio' + ARCHIVE = 'archive' + UNDETERMINED = 'undetermined' + + class CompressionEnum(str, Enum): + """ + _For an archive-type resource_, the format of the compressed archive: + * `zip` for a **.zip** file + * `gzip` for a **.tar.gz** file + Omitted for an audio-type resource. + """ + + ZIP = 'zip' + GZIP = 'gzip' + -class AudioListing(object): +class AudioListing: """ - AudioListing. - - :attr int duration: (optional) **For an audio-type resource,** the total seconds of - audio in the resource. Omitted for an archive-type resource. - :attr str name: (optional) **For an audio-type resource,** the user-specified name of - the resource. Omitted for an archive-type resource. - :attr AudioDetails details: (optional) **For an audio-type resource,** an - `AudioDetails` object that provides detailed information about the resource. The - object is empty until the service finishes processing the audio. Omitted for an - archive-type resource. - :attr str status: (optional) **For an audio-type resource,** the status of the - resource: - * `ok`: The service successfully analyzed the audio data. The data can be used to - train the custom model. - * `being_processed`: The service is still analyzing the audio data. The service cannot - accept requests to add new audio resources or to train the custom model until its - analysis is complete. - * `invalid`: The audio data is not valid for training the custom model (possibly - because it has the wrong format or sampling rate, or because it is corrupted). - Omitted for an archive-type resource. - :attr AudioResource container: (optional) **For an archive-type resource,** an object - of type `AudioResource` that provides information about the resource. Omitted for an - audio-type resource. - :attr list[AudioResource] audio: (optional) **For an archive-type resource,** an array - of `AudioResource` objects that provides information about the audio-type resources - that are contained in the resource. Omitted for an audio-type resource. + Information about an audio resource from a custom acoustic model. + + :param int duration: (optional) _For an audio-type resource_, the total seconds + of audio in the resource. Omitted for an archive-type resource. + :param str name: (optional) _For an audio-type resource_, the user-specified + name of the resource. Omitted for an archive-type resource. + :param AudioDetails details: (optional) _For an audio-type resource_, an + `AudioDetails` object that provides detailed information about the resource. The + object is empty until the service finishes processing the audio. Omitted for an + archive-type resource. + :param str status: (optional) _For an audio-type resource_, the status of the + resource: + * `ok`: The service successfully analyzed the audio data. The data can be used + to train the custom model. + * `being_processed`: The service is still analyzing the audio data. The service + cannot accept requests to add new audio resources or to train the custom model + until its analysis is complete. + * `invalid`: The audio data is not valid for training the custom model (possibly + because it has the wrong format or sampling rate, or because it is corrupted). + Omitted for an archive-type resource. + :param AudioResource container: (optional) _For an archive-type resource_, an + object of type `AudioResource` that provides information about the resource. + Omitted for an audio-type resource. + :param List[AudioResource] audio: (optional) _For an archive-type resource_, an + array of `AudioResource` objects that provides information about the audio-type + resources that are contained in the resource. Omitted for an audio-type + resource. """ - def __init__(self, - duration=None, - name=None, - details=None, - status=None, - container=None, - audio=None): + def __init__( + self, + *, + duration: Optional[int] = None, + name: Optional[str] = None, + details: Optional['AudioDetails'] = None, + status: Optional[str] = None, + container: Optional['AudioResource'] = None, + audio: Optional[List['AudioResource']] = None, + ) -> None: """ Initialize a AudioListing object. - :param int duration: (optional) **For an audio-type resource,** the total seconds - of audio in the resource. Omitted for an archive-type resource. - :param str name: (optional) **For an audio-type resource,** the user-specified - name of the resource. Omitted for an archive-type resource. - :param AudioDetails details: (optional) **For an audio-type resource,** an - `AudioDetails` object that provides detailed information about the resource. The - object is empty until the service finishes processing the audio. Omitted for an - archive-type resource. - :param str status: (optional) **For an audio-type resource,** the status of the - resource: - * `ok`: The service successfully analyzed the audio data. The data can be used to - train the custom model. - * `being_processed`: The service is still analyzing the audio data. The service - cannot accept requests to add new audio resources or to train the custom model - until its analysis is complete. - * `invalid`: The audio data is not valid for training the custom model (possibly - because it has the wrong format or sampling rate, or because it is corrupted). - Omitted for an archive-type resource. - :param AudioResource container: (optional) **For an archive-type resource,** an - object of type `AudioResource` that provides information about the resource. - Omitted for an audio-type resource. - :param list[AudioResource] audio: (optional) **For an archive-type resource,** an - array of `AudioResource` objects that provides information about the audio-type - resources that are contained in the resource. Omitted for an audio-type resource. + :param int duration: (optional) _For an audio-type resource_, the total + seconds of audio in the resource. Omitted for an archive-type resource. + :param str name: (optional) _For an audio-type resource_, the + user-specified name of the resource. Omitted for an archive-type resource. + :param AudioDetails details: (optional) _For an audio-type resource_, an + `AudioDetails` object that provides detailed information about the + resource. The object is empty until the service finishes processing the + audio. Omitted for an archive-type resource. + :param str status: (optional) _For an audio-type resource_, the status of + the resource: + * `ok`: The service successfully analyzed the audio data. The data can be + used to train the custom model. + * `being_processed`: The service is still analyzing the audio data. The + service cannot accept requests to add new audio resources or to train the + custom model until its analysis is complete. + * `invalid`: The audio data is not valid for training the custom model + (possibly because it has the wrong format or sampling rate, or because it + is corrupted). + Omitted for an archive-type resource. + :param AudioResource container: (optional) _For an archive-type resource_, + an object of type `AudioResource` that provides information about the + resource. Omitted for an audio-type resource. + :param List[AudioResource] audio: (optional) _For an archive-type + resource_, an array of `AudioResource` objects that provides information + about the audio-type resources that are contained in the resource. Omitted + for an audio-type resource. """ self.duration = duration self.name = name @@ -3221,383 +5598,1444 @@ def __init__(self, self.audio = audio @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'AudioListing': """Initialize a AudioListing object from a json dictionary.""" args = {} - if 'duration' in _dict: - args['duration'] = _dict.get('duration') - if 'name' in _dict: - args['name'] = _dict.get('name') - if 'details' in _dict: - args['details'] = AudioDetails._from_dict(_dict.get('details')) - if 'status' in _dict: - args['status'] = _dict.get('status') - if 'container' in _dict: - args['container'] = AudioResource._from_dict(_dict.get('container')) - if 'audio' in _dict: - args['audio'] = [ - AudioResource._from_dict(x) for x in (_dict.get('audio')) - ] + if (duration := _dict.get('duration')) is not None: + args['duration'] = duration + if (name := _dict.get('name')) is not None: + args['name'] = name + if (details := _dict.get('details')) is not None: + args['details'] = AudioDetails.from_dict(details) + if (status := _dict.get('status')) is not None: + args['status'] = status + if (container := _dict.get('container')) is not None: + args['container'] = AudioResource.from_dict(container) + if (audio := _dict.get('audio')) is not None: + args['audio'] = [AudioResource.from_dict(v) for v in audio] return cls(**args) + @classmethod + def _from_dict(cls, _dict): + """Initialize a AudioListing object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'duration') and self.duration is not None: + _dict['duration'] = self.duration + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'details') and self.details is not None: + if isinstance(self.details, dict): + _dict['details'] = self.details + else: + _dict['details'] = self.details.to_dict() + if hasattr(self, 'status') and self.status is not None: + _dict['status'] = self.status + if hasattr(self, 'container') and self.container is not None: + if isinstance(self.container, dict): + _dict['container'] = self.container + else: + _dict['container'] = self.container.to_dict() + if hasattr(self, 'audio') and self.audio is not None: + audio_list = [] + for v in self.audio: + if isinstance(v, dict): + audio_list.append(v) + else: + audio_list.append(v.to_dict()) + _dict['audio'] = audio_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this AudioListing object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'AudioListing') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'AudioListing') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class StatusEnum(str, Enum): + """ + _For an audio-type resource_, the status of the resource: + * `ok`: The service successfully analyzed the audio data. The data can be used to + train the custom model. + * `being_processed`: The service is still analyzing the audio data. The service + cannot accept requests to add new audio resources or to train the custom model + until its analysis is complete. + * `invalid`: The audio data is not valid for training the custom model (possibly + because it has the wrong format or sampling rate, or because it is corrupted). + Omitted for an archive-type resource. + """ + + OK = 'ok' + BEING_PROCESSED = 'being_processed' + INVALID = 'invalid' + + +class AudioMetrics: + """ + If audio metrics are requested, information about the signal characteristics of the + input audio. + + :param float sampling_interval: The interval in seconds (typically 0.1 seconds) + at which the service calculated the audio metrics. In other words, how often the + service calculated the metrics. A single unit in each histogram (see the + `AudioMetricsHistogramBin` object) is calculated based on a `sampling_interval` + length of audio. + :param AudioMetricsDetails accumulated: Detailed information about the signal + characteristics of the input audio. + """ + + def __init__( + self, + sampling_interval: float, + accumulated: 'AudioMetricsDetails', + ) -> None: + """ + Initialize a AudioMetrics object. + + :param float sampling_interval: The interval in seconds (typically 0.1 + seconds) at which the service calculated the audio metrics. In other words, + how often the service calculated the metrics. A single unit in each + histogram (see the `AudioMetricsHistogramBin` object) is calculated based + on a `sampling_interval` length of audio. + :param AudioMetricsDetails accumulated: Detailed information about the + signal characteristics of the input audio. + """ + self.sampling_interval = sampling_interval + self.accumulated = accumulated + + @classmethod + def from_dict(cls, _dict: Dict) -> 'AudioMetrics': + """Initialize a AudioMetrics object from a json dictionary.""" + args = {} + if (sampling_interval := _dict.get('sampling_interval')) is not None: + args['sampling_interval'] = sampling_interval + else: + raise ValueError( + 'Required property \'sampling_interval\' not present in AudioMetrics JSON' + ) + if (accumulated := _dict.get('accumulated')) is not None: + args['accumulated'] = AudioMetricsDetails.from_dict(accumulated) + else: + raise ValueError( + 'Required property \'accumulated\' not present in AudioMetrics JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a AudioMetrics object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, + 'sampling_interval') and self.sampling_interval is not None: + _dict['sampling_interval'] = self.sampling_interval + if hasattr(self, 'accumulated') and self.accumulated is not None: + if isinstance(self.accumulated, dict): + _dict['accumulated'] = self.accumulated + else: + _dict['accumulated'] = self.accumulated.to_dict() + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this AudioMetrics object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'AudioMetrics') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'AudioMetrics') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class AudioMetricsDetails: + """ + Detailed information about the signal characteristics of the input audio. + + :param bool final: If `true`, indicates the end of the audio stream, meaning + that transcription is complete. Currently, the field is always `true`. The + service returns metrics just once per audio stream. The results provide + aggregated audio metrics that pertain to the complete audio stream. + :param float end_time: The end time in seconds of the block of audio to which + the metrics apply. + :param float signal_to_noise_ratio: (optional) The signal-to-noise ratio (SNR) + for the audio signal. The value indicates the ratio of speech to noise in the + audio. A valid value lies in the range of 0 to 100 decibels (dB). The service + omits the field if it cannot compute the SNR for the audio. + :param float speech_ratio: The ratio of speech to non-speech segments in the + audio signal. The value lies in the range of 0.0 to 1.0. + :param float high_frequency_loss: The probability that the audio signal is + missing the upper half of its frequency content. + * A value close to 1.0 typically indicates artificially up-sampled audio, which + negatively impacts the accuracy of the transcription results. + * A value at or near 0.0 indicates that the audio signal is good and has a full + spectrum. + * A value around 0.5 means that detection of the frequency content is unreliable + or not available. + :param List[AudioMetricsHistogramBin] direct_current_offset: An array of + `AudioMetricsHistogramBin` objects that defines a histogram of the cumulative + direct current (DC) component of the audio signal. + :param List[AudioMetricsHistogramBin] clipping_rate: An array of + `AudioMetricsHistogramBin` objects that defines a histogram of the clipping rate + for the audio segments. The clipping rate is defined as the fraction of samples + in the segment that reach the maximum or minimum value that is offered by the + audio quantization range. The service auto-detects either a 16-bit Pulse-Code + Modulation(PCM) audio range (-32768 to +32767) or a unit range (-1.0 to +1.0). + The clipping rate is between 0.0 and 1.0, with higher values indicating possible + degradation of speech recognition. + :param List[AudioMetricsHistogramBin] speech_level: An array of + `AudioMetricsHistogramBin` objects that defines a histogram of the signal level + in segments of the audio that contain speech. The signal level is computed as + the Root-Mean-Square (RMS) value in a decibel (dB) scale normalized to the range + 0.0 (minimum level) to 1.0 (maximum level). + :param List[AudioMetricsHistogramBin] non_speech_level: An array of + `AudioMetricsHistogramBin` objects that defines a histogram of the signal level + in segments of the audio that do not contain speech. The signal level is + computed as the Root-Mean-Square (RMS) value in a decibel (dB) scale normalized + to the range 0.0 (minimum level) to 1.0 (maximum level). + """ + + def __init__( + self, + final: bool, + end_time: float, + speech_ratio: float, + high_frequency_loss: float, + direct_current_offset: List['AudioMetricsHistogramBin'], + clipping_rate: List['AudioMetricsHistogramBin'], + speech_level: List['AudioMetricsHistogramBin'], + non_speech_level: List['AudioMetricsHistogramBin'], + *, + signal_to_noise_ratio: Optional[float] = None, + ) -> None: + """ + Initialize a AudioMetricsDetails object. + + :param bool final: If `true`, indicates the end of the audio stream, + meaning that transcription is complete. Currently, the field is always + `true`. The service returns metrics just once per audio stream. The results + provide aggregated audio metrics that pertain to the complete audio stream. + :param float end_time: The end time in seconds of the block of audio to + which the metrics apply. + :param float speech_ratio: The ratio of speech to non-speech segments in + the audio signal. The value lies in the range of 0.0 to 1.0. + :param float high_frequency_loss: The probability that the audio signal is + missing the upper half of its frequency content. + * A value close to 1.0 typically indicates artificially up-sampled audio, + which negatively impacts the accuracy of the transcription results. + * A value at or near 0.0 indicates that the audio signal is good and has a + full spectrum. + * A value around 0.5 means that detection of the frequency content is + unreliable or not available. + :param List[AudioMetricsHistogramBin] direct_current_offset: An array of + `AudioMetricsHistogramBin` objects that defines a histogram of the + cumulative direct current (DC) component of the audio signal. + :param List[AudioMetricsHistogramBin] clipping_rate: An array of + `AudioMetricsHistogramBin` objects that defines a histogram of the clipping + rate for the audio segments. The clipping rate is defined as the fraction + of samples in the segment that reach the maximum or minimum value that is + offered by the audio quantization range. The service auto-detects either a + 16-bit Pulse-Code Modulation(PCM) audio range (-32768 to +32767) or a unit + range (-1.0 to +1.0). The clipping rate is between 0.0 and 1.0, with higher + values indicating possible degradation of speech recognition. + :param List[AudioMetricsHistogramBin] speech_level: An array of + `AudioMetricsHistogramBin` objects that defines a histogram of the signal + level in segments of the audio that contain speech. The signal level is + computed as the Root-Mean-Square (RMS) value in a decibel (dB) scale + normalized to the range 0.0 (minimum level) to 1.0 (maximum level). + :param List[AudioMetricsHistogramBin] non_speech_level: An array of + `AudioMetricsHistogramBin` objects that defines a histogram of the signal + level in segments of the audio that do not contain speech. The signal level + is computed as the Root-Mean-Square (RMS) value in a decibel (dB) scale + normalized to the range 0.0 (minimum level) to 1.0 (maximum level). + :param float signal_to_noise_ratio: (optional) The signal-to-noise ratio + (SNR) for the audio signal. The value indicates the ratio of speech to + noise in the audio. A valid value lies in the range of 0 to 100 decibels + (dB). The service omits the field if it cannot compute the SNR for the + audio. + """ + self.final = final + self.end_time = end_time + self.signal_to_noise_ratio = signal_to_noise_ratio + self.speech_ratio = speech_ratio + self.high_frequency_loss = high_frequency_loss + self.direct_current_offset = direct_current_offset + self.clipping_rate = clipping_rate + self.speech_level = speech_level + self.non_speech_level = non_speech_level + + @classmethod + def from_dict(cls, _dict: Dict) -> 'AudioMetricsDetails': + """Initialize a AudioMetricsDetails object from a json dictionary.""" + args = {} + if (final := _dict.get('final')) is not None: + args['final'] = final + else: + raise ValueError( + 'Required property \'final\' not present in AudioMetricsDetails JSON' + ) + if (end_time := _dict.get('end_time')) is not None: + args['end_time'] = end_time + else: + raise ValueError( + 'Required property \'end_time\' not present in AudioMetricsDetails JSON' + ) + if (signal_to_noise_ratio := + _dict.get('signal_to_noise_ratio')) is not None: + args['signal_to_noise_ratio'] = signal_to_noise_ratio + if (speech_ratio := _dict.get('speech_ratio')) is not None: + args['speech_ratio'] = speech_ratio + else: + raise ValueError( + 'Required property \'speech_ratio\' not present in AudioMetricsDetails JSON' + ) + if (high_frequency_loss := + _dict.get('high_frequency_loss')) is not None: + args['high_frequency_loss'] = high_frequency_loss + else: + raise ValueError( + 'Required property \'high_frequency_loss\' not present in AudioMetricsDetails JSON' + ) + if (direct_current_offset := + _dict.get('direct_current_offset')) is not None: + args['direct_current_offset'] = [ + AudioMetricsHistogramBin.from_dict(v) + for v in direct_current_offset + ] + else: + raise ValueError( + 'Required property \'direct_current_offset\' not present in AudioMetricsDetails JSON' + ) + if (clipping_rate := _dict.get('clipping_rate')) is not None: + args['clipping_rate'] = [ + AudioMetricsHistogramBin.from_dict(v) for v in clipping_rate + ] + else: + raise ValueError( + 'Required property \'clipping_rate\' not present in AudioMetricsDetails JSON' + ) + if (speech_level := _dict.get('speech_level')) is not None: + args['speech_level'] = [ + AudioMetricsHistogramBin.from_dict(v) for v in speech_level + ] + else: + raise ValueError( + 'Required property \'speech_level\' not present in AudioMetricsDetails JSON' + ) + if (non_speech_level := _dict.get('non_speech_level')) is not None: + args['non_speech_level'] = [ + AudioMetricsHistogramBin.from_dict(v) for v in non_speech_level + ] + else: + raise ValueError( + 'Required property \'non_speech_level\' not present in AudioMetricsDetails JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a AudioMetricsDetails object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'final') and self.final is not None: + _dict['final'] = self.final + if hasattr(self, 'end_time') and self.end_time is not None: + _dict['end_time'] = self.end_time + if hasattr(self, 'signal_to_noise_ratio' + ) and self.signal_to_noise_ratio is not None: + _dict['signal_to_noise_ratio'] = self.signal_to_noise_ratio + if hasattr(self, 'speech_ratio') and self.speech_ratio is not None: + _dict['speech_ratio'] = self.speech_ratio + if hasattr( + self, + 'high_frequency_loss') and self.high_frequency_loss is not None: + _dict['high_frequency_loss'] = self.high_frequency_loss + if hasattr(self, 'direct_current_offset' + ) and self.direct_current_offset is not None: + direct_current_offset_list = [] + for v in self.direct_current_offset: + if isinstance(v, dict): + direct_current_offset_list.append(v) + else: + direct_current_offset_list.append(v.to_dict()) + _dict['direct_current_offset'] = direct_current_offset_list + if hasattr(self, 'clipping_rate') and self.clipping_rate is not None: + clipping_rate_list = [] + for v in self.clipping_rate: + if isinstance(v, dict): + clipping_rate_list.append(v) + else: + clipping_rate_list.append(v.to_dict()) + _dict['clipping_rate'] = clipping_rate_list + if hasattr(self, 'speech_level') and self.speech_level is not None: + speech_level_list = [] + for v in self.speech_level: + if isinstance(v, dict): + speech_level_list.append(v) + else: + speech_level_list.append(v.to_dict()) + _dict['speech_level'] = speech_level_list + if hasattr(self, + 'non_speech_level') and self.non_speech_level is not None: + non_speech_level_list = [] + for v in self.non_speech_level: + if isinstance(v, dict): + non_speech_level_list.append(v) + else: + non_speech_level_list.append(v.to_dict()) + _dict['non_speech_level'] = non_speech_level_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this AudioMetricsDetails object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'AudioMetricsDetails') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'AudioMetricsDetails') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class AudioMetricsHistogramBin: + """ + A bin with defined boundaries that indicates the number of values in a range of signal + characteristics for a histogram. The first and last bins of a histogram are the + boundary bins. They cover the intervals between negative infinity and the first + boundary, and between the last boundary and positive infinity, respectively. + + :param float begin: The lower boundary of the bin in the histogram. + :param float end: The upper boundary of the bin in the histogram. + :param int count: The number of values in the bin of the histogram. + """ + + def __init__( + self, + begin: float, + end: float, + count: int, + ) -> None: + """ + Initialize a AudioMetricsHistogramBin object. + + :param float begin: The lower boundary of the bin in the histogram. + :param float end: The upper boundary of the bin in the histogram. + :param int count: The number of values in the bin of the histogram. + """ + self.begin = begin + self.end = end + self.count = count + + @classmethod + def from_dict(cls, _dict: Dict) -> 'AudioMetricsHistogramBin': + """Initialize a AudioMetricsHistogramBin object from a json dictionary.""" + args = {} + if (begin := _dict.get('begin')) is not None: + args['begin'] = begin + else: + raise ValueError( + 'Required property \'begin\' not present in AudioMetricsHistogramBin JSON' + ) + if (end := _dict.get('end')) is not None: + args['end'] = end + else: + raise ValueError( + 'Required property \'end\' not present in AudioMetricsHistogramBin JSON' + ) + if (count := _dict.get('count')) is not None: + args['count'] = count + else: + raise ValueError( + 'Required property \'count\' not present in AudioMetricsHistogramBin JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a AudioMetricsHistogramBin object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'begin') and self.begin is not None: + _dict['begin'] = self.begin + if hasattr(self, 'end') and self.end is not None: + _dict['end'] = self.end + if hasattr(self, 'count') and self.count is not None: + _dict['count'] = self.count + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this AudioMetricsHistogramBin object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'AudioMetricsHistogramBin') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'AudioMetricsHistogramBin') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class AudioResource: + """ + Information about an audio resource from a custom acoustic model. + + :param int duration: The total seconds of audio in the audio resource. + :param str name: _For an archive-type resource_, the user-specified name of the + resource. + _For an audio-type resource_, the user-specified name of the resource or the + name of the audio file that the user added for the resource. The value depends + on the method that is called. + :param AudioDetails details: An `AudioDetails` object that provides detailed + information about the audio resource. The object is empty until the service + finishes processing the audio. + :param str status: The status of the audio resource: + * `ok`: The service successfully analyzed the audio data. The data can be used + to train the custom model. + * `being_processed`: The service is still analyzing the audio data. The service + cannot accept requests to add new audio resources or to train the custom model + until its analysis is complete. + * `invalid`: The audio data is not valid for training the custom model (possibly + because it has the wrong format or sampling rate, or because it is corrupted). + For an archive file, the entire archive is invalid if any of its audio files are + invalid. + """ + + def __init__( + self, + duration: int, + name: str, + details: 'AudioDetails', + status: str, + ) -> None: + """ + Initialize a AudioResource object. + + :param int duration: The total seconds of audio in the audio resource. + :param str name: _For an archive-type resource_, the user-specified name of + the resource. + _For an audio-type resource_, the user-specified name of the resource or + the name of the audio file that the user added for the resource. The value + depends on the method that is called. + :param AudioDetails details: An `AudioDetails` object that provides + detailed information about the audio resource. The object is empty until + the service finishes processing the audio. + :param str status: The status of the audio resource: + * `ok`: The service successfully analyzed the audio data. The data can be + used to train the custom model. + * `being_processed`: The service is still analyzing the audio data. The + service cannot accept requests to add new audio resources or to train the + custom model until its analysis is complete. + * `invalid`: The audio data is not valid for training the custom model + (possibly because it has the wrong format or sampling rate, or because it + is corrupted). For an archive file, the entire archive is invalid if any of + its audio files are invalid. + """ + self.duration = duration + self.name = name + self.details = details + self.status = status + + @classmethod + def from_dict(cls, _dict: Dict) -> 'AudioResource': + """Initialize a AudioResource object from a json dictionary.""" + args = {} + if (duration := _dict.get('duration')) is not None: + args['duration'] = duration + else: + raise ValueError( + 'Required property \'duration\' not present in AudioResource JSON' + ) + if (name := _dict.get('name')) is not None: + args['name'] = name + else: + raise ValueError( + 'Required property \'name\' not present in AudioResource JSON') + if (details := _dict.get('details')) is not None: + args['details'] = AudioDetails.from_dict(details) + else: + raise ValueError( + 'Required property \'details\' not present in AudioResource JSON' + ) + if (status := _dict.get('status')) is not None: + args['status'] = status + else: + raise ValueError( + 'Required property \'status\' not present in AudioResource JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a AudioResource object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'duration') and self.duration is not None: + _dict['duration'] = self.duration + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'details') and self.details is not None: + if isinstance(self.details, dict): + _dict['details'] = self.details + else: + _dict['details'] = self.details.to_dict() + if hasattr(self, 'status') and self.status is not None: + _dict['status'] = self.status + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this AudioResource object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'AudioResource') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'AudioResource') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class StatusEnum(str, Enum): + """ + The status of the audio resource: + * `ok`: The service successfully analyzed the audio data. The data can be used to + train the custom model. + * `being_processed`: The service is still analyzing the audio data. The service + cannot accept requests to add new audio resources or to train the custom model + until its analysis is complete. + * `invalid`: The audio data is not valid for training the custom model (possibly + because it has the wrong format or sampling rate, or because it is corrupted). For + an archive file, the entire archive is invalid if any of its audio files are + invalid. + """ + + OK = 'ok' + BEING_PROCESSED = 'being_processed' + INVALID = 'invalid' + + +class AudioResources: + """ + Information about the audio resources from a custom acoustic model. + + :param float total_minutes_of_audio: The total minutes of accumulated audio + summed over all of the valid audio resources for the custom acoustic model. You + can use this value to determine whether the custom model has too little or too + much audio to begin training. + :param List[AudioResource] audio: An array of `AudioResource` objects that + provides information about the audio resources of the custom acoustic model. The + array is empty if the custom model has no audio resources. + """ + + def __init__( + self, + total_minutes_of_audio: float, + audio: List['AudioResource'], + ) -> None: + """ + Initialize a AudioResources object. + + :param float total_minutes_of_audio: The total minutes of accumulated audio + summed over all of the valid audio resources for the custom acoustic model. + You can use this value to determine whether the custom model has too little + or too much audio to begin training. + :param List[AudioResource] audio: An array of `AudioResource` objects that + provides information about the audio resources of the custom acoustic + model. The array is empty if the custom model has no audio resources. + """ + self.total_minutes_of_audio = total_minutes_of_audio + self.audio = audio + + @classmethod + def from_dict(cls, _dict: Dict) -> 'AudioResources': + """Initialize a AudioResources object from a json dictionary.""" + args = {} + if (total_minutes_of_audio := + _dict.get('total_minutes_of_audio')) is not None: + args['total_minutes_of_audio'] = total_minutes_of_audio + else: + raise ValueError( + 'Required property \'total_minutes_of_audio\' not present in AudioResources JSON' + ) + if (audio := _dict.get('audio')) is not None: + args['audio'] = [AudioResource.from_dict(v) for v in audio] + else: + raise ValueError( + 'Required property \'audio\' not present in AudioResources JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a AudioResources object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'total_minutes_of_audio' + ) and self.total_minutes_of_audio is not None: + _dict['total_minutes_of_audio'] = self.total_minutes_of_audio + if hasattr(self, 'audio') and self.audio is not None: + audio_list = [] + for v in self.audio: + if isinstance(v, dict): + audio_list.append(v) + else: + audio_list.append(v.to_dict()) + _dict['audio'] = audio_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this AudioResources object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'AudioResources') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'AudioResources') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Corpora: + """ + Information about the corpora from a custom language model. + + :param List[Corpus] corpora: An array of `Corpus` objects that provides + information about the corpora for the custom model. The array is empty if the + custom model has no corpora. + """ + + def __init__( + self, + corpora: List['Corpus'], + ) -> None: + """ + Initialize a Corpora object. + + :param List[Corpus] corpora: An array of `Corpus` objects that provides + information about the corpora for the custom model. The array is empty if + the custom model has no corpora. + """ + self.corpora = corpora + + @classmethod + def from_dict(cls, _dict: Dict) -> 'Corpora': + """Initialize a Corpora object from a json dictionary.""" + args = {} + if (corpora := _dict.get('corpora')) is not None: + args['corpora'] = [Corpus.from_dict(v) for v in corpora] + else: + raise ValueError( + 'Required property \'corpora\' not present in Corpora JSON') + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Corpora object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'corpora') and self.corpora is not None: + corpora_list = [] + for v in self.corpora: + if isinstance(v, dict): + corpora_list.append(v) + else: + corpora_list.append(v.to_dict()) + _dict['corpora'] = corpora_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Corpora object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'Corpora') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'Corpora') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Corpus: + """ + Information about a corpus from a custom language model. + + :param str name: The name of the corpus. + :param int total_words: The total number of words in the corpus. The value is + `0` while the corpus is being processed. + :param int out_of_vocabulary_words: _For custom models that are based on large + speech models and previous-generation models_, the number of OOV words extracted + from the corpus. The value is `0` while the corpus is being processed. + _For custom models that are based on next-generation models_, no OOV words are + extracted from corpora, so the value is always `0`. + :param str status: The status of the corpus: + * `analyzed`: The service successfully analyzed the corpus. The custom model can + be trained with data from the corpus. + * `being_processed`: The service is still analyzing the corpus. The service + cannot accept requests to add new resources or to train the custom model. + * `undetermined`: The service encountered an error while processing the corpus. + The `error` field describes the failure. + :param str error: (optional) If the status of the corpus is `undetermined`, the + following message: `Analysis of corpus 'name' failed. Please try adding the + corpus again by setting the 'allow_overwrite' flag to 'true'`. + """ + + def __init__( + self, + name: str, + total_words: int, + out_of_vocabulary_words: int, + status: str, + *, + error: Optional[str] = None, + ) -> None: + """ + Initialize a Corpus object. + + :param str name: The name of the corpus. + :param int total_words: The total number of words in the corpus. The value + is `0` while the corpus is being processed. + :param int out_of_vocabulary_words: _For custom models that are based on + large speech models and previous-generation models_, the number of OOV + words extracted from the corpus. The value is `0` while the corpus is being + processed. + _For custom models that are based on next-generation models_, no OOV words + are extracted from corpora, so the value is always `0`. + :param str status: The status of the corpus: + * `analyzed`: The service successfully analyzed the corpus. The custom + model can be trained with data from the corpus. + * `being_processed`: The service is still analyzing the corpus. The service + cannot accept requests to add new resources or to train the custom model. + * `undetermined`: The service encountered an error while processing the + corpus. The `error` field describes the failure. + :param str error: (optional) If the status of the corpus is `undetermined`, + the following message: `Analysis of corpus 'name' failed. Please try adding + the corpus again by setting the 'allow_overwrite' flag to 'true'`. + """ + self.name = name + self.total_words = total_words + self.out_of_vocabulary_words = out_of_vocabulary_words + self.status = status + self.error = error + + @classmethod + def from_dict(cls, _dict: Dict) -> 'Corpus': + """Initialize a Corpus object from a json dictionary.""" + args = {} + if (name := _dict.get('name')) is not None: + args['name'] = name + else: + raise ValueError( + 'Required property \'name\' not present in Corpus JSON') + if (total_words := _dict.get('total_words')) is not None: + args['total_words'] = total_words + else: + raise ValueError( + 'Required property \'total_words\' not present in Corpus JSON') + if (out_of_vocabulary_words := + _dict.get('out_of_vocabulary_words')) is not None: + args['out_of_vocabulary_words'] = out_of_vocabulary_words + else: + raise ValueError( + 'Required property \'out_of_vocabulary_words\' not present in Corpus JSON' + ) + if (status := _dict.get('status')) is not None: + args['status'] = status + else: + raise ValueError( + 'Required property \'status\' not present in Corpus JSON') + if (error := _dict.get('error')) is not None: + args['error'] = error + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Corpus object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'total_words') and self.total_words is not None: + _dict['total_words'] = self.total_words + if hasattr(self, 'out_of_vocabulary_words' + ) and self.out_of_vocabulary_words is not None: + _dict['out_of_vocabulary_words'] = self.out_of_vocabulary_words + if hasattr(self, 'status') and self.status is not None: + _dict['status'] = self.status + if hasattr(self, 'error') and self.error is not None: + _dict['error'] = self.error + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Corpus object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'Corpus') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'Corpus') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class StatusEnum(str, Enum): + """ + The status of the corpus: + * `analyzed`: The service successfully analyzed the corpus. The custom model can + be trained with data from the corpus. + * `being_processed`: The service is still analyzing the corpus. The service cannot + accept requests to add new resources or to train the custom model. + * `undetermined`: The service encountered an error while processing the corpus. + The `error` field describes the failure. + """ + + ANALYZED = 'analyzed' + BEING_PROCESSED = 'being_processed' + UNDETERMINED = 'undetermined' + + +class CustomWord: + """ + Information about a word that is to be added to a custom language model. + + :param str word: (optional) For the [Add custom words](#addwords) method, you + must specify the custom word that is to be added to or updated in the custom + model. Do not use characters that need to be URL-encoded, for example, spaces, + slashes, backslashes, colons, ampersands, double quotes, plus signs, equals + signs, or question marks. Use a `-` (dash) or `_` (underscore) to connect the + tokens of compound words. A Japanese custom word can include at most 25 + characters, not including leading or trailing spaces. + Omit this parameter for the [Add a custom word](#addword) method. + :param List[str] mapping_only: (optional) Parameter for custom words. You can + use the 'mapping_only' key in custom words as a form of post processing. This + key parameter has a boolean value to determine whether 'sounds_like' (for + non-Japanese models) or word (for Japanese) is not used for the model + fine-tuning, but for the replacement for 'display_as'. This feature helps you + when you use custom words exclusively to map 'sounds_like' (or word) to + 'display_as' value. When you use custom words solely for post-processing + purposes that does not need fine-tuning. + :param List[str] sounds_like: (optional) As array of sounds-like pronunciations + for the custom word. Specify how words that are difficult to pronounce, foreign + words, acronyms, and so on can be pronounced by users. + * _For custom models that are based on previous-generation models_, for a word + that is not in the service's base vocabulary, omit the parameter to have the + service automatically generate a sounds-like pronunciation for the word. + * For a word that is in the service's base vocabulary, use the parameter to + specify additional pronunciations for the word. You cannot override the default + pronunciation of a word; pronunciations you add augment the pronunciation from + the base vocabulary. + A word can have at most five sounds-like pronunciations. A pronunciation can + include at most 40 characters, not including leading or trailing spaces. A + Japanese pronunciation can include at most 25 characters, not including leading + or trailing spaces. + :param str display_as: (optional) An alternative spelling for the custom word + when it appears in a transcript. Use the parameter when you want the word to + have a spelling that is different from its usual representation or from its + spelling in corpora training data. + _For custom models that are based on next-generation models_, the service uses + the spelling of the word as the display-as value if you omit the field. + """ + + def __init__( + self, + *, + word: Optional[str] = None, + mapping_only: Optional[List[str]] = None, + sounds_like: Optional[List[str]] = None, + display_as: Optional[str] = None, + ) -> None: + """ + Initialize a CustomWord object. + + :param str word: (optional) For the [Add custom words](#addwords) method, + you must specify the custom word that is to be added to or updated in the + custom model. Do not use characters that need to be URL-encoded, for + example, spaces, slashes, backslashes, colons, ampersands, double quotes, + plus signs, equals signs, or question marks. Use a `-` (dash) or `_` + (underscore) to connect the tokens of compound words. A Japanese custom + word can include at most 25 characters, not including leading or trailing + spaces. + Omit this parameter for the [Add a custom word](#addword) method. + :param List[str] mapping_only: (optional) Parameter for custom words. You + can use the 'mapping_only' key in custom words as a form of post + processing. This key parameter has a boolean value to determine whether + 'sounds_like' (for non-Japanese models) or word (for Japanese) is not used + for the model fine-tuning, but for the replacement for 'display_as'. This + feature helps you when you use custom words exclusively to map + 'sounds_like' (or word) to 'display_as' value. When you use custom words + solely for post-processing purposes that does not need fine-tuning. + :param List[str] sounds_like: (optional) As array of sounds-like + pronunciations for the custom word. Specify how words that are difficult to + pronounce, foreign words, acronyms, and so on can be pronounced by users. + * _For custom models that are based on previous-generation models_, for a + word that is not in the service's base vocabulary, omit the parameter to + have the service automatically generate a sounds-like pronunciation for the + word. + * For a word that is in the service's base vocabulary, use the parameter to + specify additional pronunciations for the word. You cannot override the + default pronunciation of a word; pronunciations you add augment the + pronunciation from the base vocabulary. + A word can have at most five sounds-like pronunciations. A pronunciation + can include at most 40 characters, not including leading or trailing + spaces. A Japanese pronunciation can include at most 25 characters, not + including leading or trailing spaces. + :param str display_as: (optional) An alternative spelling for the custom + word when it appears in a transcript. Use the parameter when you want the + word to have a spelling that is different from its usual representation or + from its spelling in corpora training data. + _For custom models that are based on next-generation models_, the service + uses the spelling of the word as the display-as value if you omit the + field. + """ + self.word = word + self.mapping_only = mapping_only + self.sounds_like = sounds_like + self.display_as = display_as + + @classmethod + def from_dict(cls, _dict: Dict) -> 'CustomWord': + """Initialize a CustomWord object from a json dictionary.""" + args = {} + if (word := _dict.get('word')) is not None: + args['word'] = word + if (mapping_only := _dict.get('mapping_only')) is not None: + args['mapping_only'] = mapping_only + if (sounds_like := _dict.get('sounds_like')) is not None: + args['sounds_like'] = sounds_like + if (display_as := _dict.get('display_as')) is not None: + args['display_as'] = display_as + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a CustomWord object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'word') and self.word is not None: + _dict['word'] = self.word + if hasattr(self, 'mapping_only') and self.mapping_only is not None: + _dict['mapping_only'] = self.mapping_only + if hasattr(self, 'sounds_like') and self.sounds_like is not None: + _dict['sounds_like'] = self.sounds_like + if hasattr(self, 'display_as') and self.display_as is not None: + _dict['display_as'] = self.display_as + return _dict + def _to_dict(self): """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'duration') and self.duration is not None: - _dict['duration'] = self.duration - if hasattr(self, 'name') and self.name is not None: - _dict['name'] = self.name - if hasattr(self, 'details') and self.details is not None: - _dict['details'] = self.details._to_dict() - if hasattr(self, 'status') and self.status is not None: - _dict['status'] = self.status - if hasattr(self, 'container') and self.container is not None: - _dict['container'] = self.container._to_dict() - if hasattr(self, 'audio') and self.audio is not None: - _dict['audio'] = [x._to_dict() for x in self.audio] - return _dict + return self.to_dict() - def __str__(self): - """Return a `str` version of this AudioListing object.""" - return json.dumps(self._to_dict(), indent=2) + def __str__(self) -> str: + """Return a `str` version of this CustomWord object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'CustomWord') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'CustomWord') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class AudioResource(object): +class EnrichedResults: """ - AudioResource. - - :attr int duration: The total seconds of audio in the audio resource. - :attr str name: **For an archive-type resource,** the user-specified name of the - resource. - **For an audio-type resource,** the user-specified name of the resource or the name of - the audio file that the user added for the resource. The value depends on the method - that is called. - :attr AudioDetails details: An `AudioDetails` object that provides detailed - information about the audio resource. The object is empty until the service finishes - processing the audio. - :attr str status: The status of the audio resource: - * `ok`: The service successfully analyzed the audio data. The data can be used to - train the custom model. - * `being_processed`: The service is still analyzing the audio data. The service cannot - accept requests to add new audio resources or to train the custom model until its - analysis is complete. - * `invalid`: The audio data is not valid for training the custom model (possibly - because it has the wrong format or sampling rate, or because it is corrupted). For an - archive file, the entire archive is invalid if any of its audio files are invalid. + If enriched results are requested, transcription with inserted punctuation marks such + as periods, commas, question marks, and exclamation points. + + :param EnrichedResultsTranscript transcript: (optional) If enriched results are + requested, transcription with inserted punctuation marks such as periods, + commas, question marks, and exclamation points. + :param str status: (optional) The status of the enriched transcription. """ - def __init__(self, duration, name, details, status): + def __init__( + self, + *, + transcript: Optional['EnrichedResultsTranscript'] = None, + status: Optional[str] = None, + ) -> None: """ - Initialize a AudioResource object. + Initialize a EnrichedResults object. - :param int duration: The total seconds of audio in the audio resource. - :param str name: **For an archive-type resource,** the user-specified name of the - resource. - **For an audio-type resource,** the user-specified name of the resource or the - name of the audio file that the user added for the resource. The value depends on - the method that is called. - :param AudioDetails details: An `AudioDetails` object that provides detailed - information about the audio resource. The object is empty until the service - finishes processing the audio. - :param str status: The status of the audio resource: - * `ok`: The service successfully analyzed the audio data. The data can be used to - train the custom model. - * `being_processed`: The service is still analyzing the audio data. The service - cannot accept requests to add new audio resources or to train the custom model - until its analysis is complete. - * `invalid`: The audio data is not valid for training the custom model (possibly - because it has the wrong format or sampling rate, or because it is corrupted). For - an archive file, the entire archive is invalid if any of its audio files are - invalid. + :param EnrichedResultsTranscript transcript: (optional) If enriched results + are requested, transcription with inserted punctuation marks such as + periods, commas, question marks, and exclamation points. + :param str status: (optional) The status of the enriched transcription. """ - self.duration = duration - self.name = name - self.details = details + self.transcript = transcript self.status = status @classmethod - def _from_dict(cls, _dict): - """Initialize a AudioResource object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'EnrichedResults': + """Initialize a EnrichedResults object from a json dictionary.""" args = {} - if 'duration' in _dict: - args['duration'] = _dict.get('duration') - else: - raise ValueError( - 'Required property \'duration\' not present in AudioResource JSON' - ) - if 'name' in _dict: - args['name'] = _dict.get('name') - else: - raise ValueError( - 'Required property \'name\' not present in AudioResource JSON') - if 'details' in _dict: - args['details'] = AudioDetails._from_dict(_dict.get('details')) - else: - raise ValueError( - 'Required property \'details\' not present in AudioResource JSON' - ) - if 'status' in _dict: - args['status'] = _dict.get('status') - else: - raise ValueError( - 'Required property \'status\' not present in AudioResource JSON' - ) + if (transcript := _dict.get('transcript')) is not None: + args['transcript'] = EnrichedResultsTranscript.from_dict(transcript) + if (status := _dict.get('status')) is not None: + args['status'] = status return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a EnrichedResults object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'duration') and self.duration is not None: - _dict['duration'] = self.duration - if hasattr(self, 'name') and self.name is not None: - _dict['name'] = self.name - if hasattr(self, 'details') and self.details is not None: - _dict['details'] = self.details._to_dict() + if hasattr(self, 'transcript') and self.transcript is not None: + if isinstance(self.transcript, dict): + _dict['transcript'] = self.transcript + else: + _dict['transcript'] = self.transcript.to_dict() if hasattr(self, 'status') and self.status is not None: _dict['status'] = self.status return _dict - def __str__(self): - """Return a `str` version of this AudioResource object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this EnrichedResults object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'EnrichedResults') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'EnrichedResults') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class AudioResources(object): +class EnrichedResultsTranscript: """ - AudioResources. - - :attr float total_minutes_of_audio: The total minutes of accumulated audio summed over - all of the valid audio resources for the custom acoustic model. You can use this value - to determine whether the custom model has too little or too much audio to begin - training. - :attr list[AudioResource] audio: An array of `AudioResource` objects that provides - information about the audio resources of the custom acoustic model. The array is empty - if the custom model has no audio resources. + If enriched results are requested, transcription with inserted punctuation marks such + as periods, commas, question marks, and exclamation points. + + :param str text: (optional) The transcript text. + :param EnrichedResultsTranscriptTimestamp timestamp: (optional) The speaking + time from the beginning of the transcript to the end. """ - def __init__(self, total_minutes_of_audio, audio): + def __init__( + self, + *, + text: Optional[str] = None, + timestamp: Optional['EnrichedResultsTranscriptTimestamp'] = None, + ) -> None: """ - Initialize a AudioResources object. + Initialize a EnrichedResultsTranscript object. - :param float total_minutes_of_audio: The total minutes of accumulated audio summed - over all of the valid audio resources for the custom acoustic model. You can use - this value to determine whether the custom model has too little or too much audio - to begin training. - :param list[AudioResource] audio: An array of `AudioResource` objects that - provides information about the audio resources of the custom acoustic model. The - array is empty if the custom model has no audio resources. + :param str text: (optional) The transcript text. + :param EnrichedResultsTranscriptTimestamp timestamp: (optional) The + speaking time from the beginning of the transcript to the end. """ - self.total_minutes_of_audio = total_minutes_of_audio - self.audio = audio + self.text = text + self.timestamp = timestamp @classmethod - def _from_dict(cls, _dict): - """Initialize a AudioResources object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'EnrichedResultsTranscript': + """Initialize a EnrichedResultsTranscript object from a json dictionary.""" args = {} - if 'total_minutes_of_audio' in _dict: - args['total_minutes_of_audio'] = _dict.get('total_minutes_of_audio') - else: - raise ValueError( - 'Required property \'total_minutes_of_audio\' not present in AudioResources JSON' - ) - if 'audio' in _dict: - args['audio'] = [ - AudioResource._from_dict(x) for x in (_dict.get('audio')) - ] - else: - raise ValueError( - 'Required property \'audio\' not present in AudioResources JSON' - ) + if (text := _dict.get('text')) is not None: + args['text'] = text + if (timestamp := _dict.get('timestamp')) is not None: + args['timestamp'] = EnrichedResultsTranscriptTimestamp.from_dict( + timestamp) return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a EnrichedResultsTranscript object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'total_minutes_of_audio' - ) and self.total_minutes_of_audio is not None: - _dict['total_minutes_of_audio'] = self.total_minutes_of_audio - if hasattr(self, 'audio') and self.audio is not None: - _dict['audio'] = [x._to_dict() for x in self.audio] + if hasattr(self, 'text') and self.text is not None: + _dict['text'] = self.text + if hasattr(self, 'timestamp') and self.timestamp is not None: + if isinstance(self.timestamp, dict): + _dict['timestamp'] = self.timestamp + else: + _dict['timestamp'] = self.timestamp.to_dict() return _dict - def __str__(self): - """Return a `str` version of this AudioResources object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this EnrichedResultsTranscript object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'EnrichedResultsTranscript') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'EnrichedResultsTranscript') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class Corpora(object): +class EnrichedResultsTranscriptTimestamp: """ - Corpora. + The speaking time from the beginning of the transcript to the end. - :attr list[Corpus] corpora: An array of `Corpus` objects that provides information - about the corpora for the custom model. The array is empty if the custom model has no - corpora. + :param float from_: (optional) The start time of a word from the transcript. The + value matches the start time of a word from the `timestamps` array. + :param float to: (optional) The end time of a word from the transcript. The + value matches the end time of a word from the `timestamps` array. """ - def __init__(self, corpora): + def __init__( + self, + *, + from_: Optional[float] = None, + to: Optional[float] = None, + ) -> None: """ - Initialize a Corpora object. + Initialize a EnrichedResultsTranscriptTimestamp object. - :param list[Corpus] corpora: An array of `Corpus` objects that provides - information about the corpora for the custom model. The array is empty if the - custom model has no corpora. + :param float from_: (optional) The start time of a word from the + transcript. The value matches the start time of a word from the + `timestamps` array. + :param float to: (optional) The end time of a word from the transcript. The + value matches the end time of a word from the `timestamps` array. """ - self.corpora = corpora + self.from_ = from_ + self.to = to @classmethod - def _from_dict(cls, _dict): - """Initialize a Corpora object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'EnrichedResultsTranscriptTimestamp': + """Initialize a EnrichedResultsTranscriptTimestamp object from a json dictionary.""" args = {} - if 'corpora' in _dict: - args['corpora'] = [ - Corpus._from_dict(x) for x in (_dict.get('corpora')) - ] - else: - raise ValueError( - 'Required property \'corpora\' not present in Corpora JSON') + if (from_ := _dict.get('from')) is not None: + args['from_'] = from_ + if (to := _dict.get('to')) is not None: + args['to'] = to return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a EnrichedResultsTranscriptTimestamp object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'corpora') and self.corpora is not None: - _dict['corpora'] = [x._to_dict() for x in self.corpora] + if hasattr(self, 'from_') and self.from_ is not None: + _dict['from'] = self.from_ + if hasattr(self, 'to') and self.to is not None: + _dict['to'] = self.to return _dict - def __str__(self): - """Return a `str` version of this Corpora object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this EnrichedResultsTranscriptTimestamp object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'EnrichedResultsTranscriptTimestamp') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'EnrichedResultsTranscriptTimestamp') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class Corpus(object): +class Grammar: """ - Corpus. - - :attr str name: The name of the corpus. - :attr int total_words: The total number of words in the corpus. The value is `0` while - the corpus is being processed. - :attr int out_of_vocabulary_words: The number of OOV words in the corpus. The value is - `0` while the corpus is being processed. - :attr str status: The status of the corpus: - * `analyzed`: The service successfully analyzed the corpus. The custom model can be - trained with data from the corpus. - * `being_processed`: The service is still analyzing the corpus. The service cannot - accept requests to add new resources or to train the custom model. - * `undetermined`: The service encountered an error while processing the corpus. The - `error` field describes the failure. - :attr str error: (optional) If the status of the corpus is `undetermined`, the - following message: `Analysis of corpus 'name' failed. Please try adding the corpus - again by setting the 'allow_overwrite' flag to 'true'`. + Information about a grammar from a custom language model. + + :param str name: The name of the grammar. + :param int out_of_vocabulary_words: _For custom models that are based on + previous-generation models_, the number of OOV words extracted from the grammar. + The value is `0` while the grammar is being processed. + _For custom models that are based on next-generation models_, no OOV words are + extracted from grammars, so the value is always `0`. + :param str status: The status of the grammar: + * `analyzed`: The service successfully analyzed the grammar. The custom model + can be trained with data from the grammar. + * `being_processed`: The service is still analyzing the grammar. The service + cannot accept requests to add new resources or to train the custom model. + * `undetermined`: The service encountered an error while processing the grammar. + The `error` field describes the failure. + :param str error: (optional) If the status of the grammar is `undetermined`, the + following message: `Analysis of grammar '{grammar_name}' failed. Please try + fixing the error or adding the grammar again by setting the 'allow_overwrite' + flag to 'true'.`. """ - def __init__(self, - name, - total_words, - out_of_vocabulary_words, - status, - error=None): + def __init__( + self, + name: str, + out_of_vocabulary_words: int, + status: str, + *, + error: Optional[str] = None, + ) -> None: """ - Initialize a Corpus object. + Initialize a Grammar object. - :param str name: The name of the corpus. - :param int total_words: The total number of words in the corpus. The value is `0` - while the corpus is being processed. - :param int out_of_vocabulary_words: The number of OOV words in the corpus. The - value is `0` while the corpus is being processed. - :param str status: The status of the corpus: - * `analyzed`: The service successfully analyzed the corpus. The custom model can - be trained with data from the corpus. - * `being_processed`: The service is still analyzing the corpus. The service cannot - accept requests to add new resources or to train the custom model. - * `undetermined`: The service encountered an error while processing the corpus. - The `error` field describes the failure. - :param str error: (optional) If the status of the corpus is `undetermined`, the - following message: `Analysis of corpus 'name' failed. Please try adding the corpus - again by setting the 'allow_overwrite' flag to 'true'`. + :param str name: The name of the grammar. + :param int out_of_vocabulary_words: _For custom models that are based on + previous-generation models_, the number of OOV words extracted from the + grammar. The value is `0` while the grammar is being processed. + _For custom models that are based on next-generation models_, no OOV words + are extracted from grammars, so the value is always `0`. + :param str status: The status of the grammar: + * `analyzed`: The service successfully analyzed the grammar. The custom + model can be trained with data from the grammar. + * `being_processed`: The service is still analyzing the grammar. The + service cannot accept requests to add new resources or to train the custom + model. + * `undetermined`: The service encountered an error while processing the + grammar. The `error` field describes the failure. + :param str error: (optional) If the status of the grammar is + `undetermined`, the following message: `Analysis of grammar + '{grammar_name}' failed. Please try fixing the error or adding the grammar + again by setting the 'allow_overwrite' flag to 'true'.`. """ self.name = name - self.total_words = total_words self.out_of_vocabulary_words = out_of_vocabulary_words self.status = status self.error = error @classmethod - def _from_dict(cls, _dict): - """Initialize a Corpus object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'Grammar': + """Initialize a Grammar object from a json dictionary.""" args = {} - if 'name' in _dict: - args['name'] = _dict.get('name') - else: - raise ValueError( - 'Required property \'name\' not present in Corpus JSON') - if 'total_words' in _dict: - args['total_words'] = _dict.get('total_words') + if (name := _dict.get('name')) is not None: + args['name'] = name else: raise ValueError( - 'Required property \'total_words\' not present in Corpus JSON') - if 'out_of_vocabulary_words' in _dict: - args['out_of_vocabulary_words'] = _dict.get( - 'out_of_vocabulary_words') + 'Required property \'name\' not present in Grammar JSON') + if (out_of_vocabulary_words := + _dict.get('out_of_vocabulary_words')) is not None: + args['out_of_vocabulary_words'] = out_of_vocabulary_words else: raise ValueError( - 'Required property \'out_of_vocabulary_words\' not present in Corpus JSON' + 'Required property \'out_of_vocabulary_words\' not present in Grammar JSON' ) - if 'status' in _dict: - args['status'] = _dict.get('status') + if (status := _dict.get('status')) is not None: + args['status'] = status else: raise ValueError( - 'Required property \'status\' not present in Corpus JSON') - if 'error' in _dict: - args['error'] = _dict.get('error') + 'Required property \'status\' not present in Grammar JSON') + if (error := _dict.get('error')) is not None: + args['error'] = error return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a Grammar object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'name') and self.name is not None: _dict['name'] = self.name - if hasattr(self, 'total_words') and self.total_words is not None: - _dict['total_words'] = self.total_words if hasattr(self, 'out_of_vocabulary_words' ) and self.out_of_vocabulary_words is not None: _dict['out_of_vocabulary_words'] = self.out_of_vocabulary_words @@ -3607,466 +7045,602 @@ def _to_dict(self): _dict['error'] = self.error return _dict - def __str__(self): - """Return a `str` version of this Corpus object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Grammar object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'Grammar') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'Grammar') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other + class StatusEnum(str, Enum): + """ + The status of the grammar: + * `analyzed`: The service successfully analyzed the grammar. The custom model can + be trained with data from the grammar. + * `being_processed`: The service is still analyzing the grammar. The service + cannot accept requests to add new resources or to train the custom model. + * `undetermined`: The service encountered an error while processing the grammar. + The `error` field describes the failure. + """ + + ANALYZED = 'analyzed' + BEING_PROCESSED = 'being_processed' + UNDETERMINED = 'undetermined' + -class CustomWord(object): +class Grammars: """ - CustomWord. - - :attr str word: (optional) For the **Add custom words** method, you must specify the - custom word that is to be added to or updated in the custom model. Do not include - spaces in the word. Use a `-` (dash) or `_` (underscore) to connect the tokens of - compound words. - Omit this parameter for the **Add a custom word** method. - :attr list[str] sounds_like: (optional) An array of sounds-like pronunciations for the - custom word. Specify how words that are difficult to pronounce, foreign words, - acronyms, and so on can be pronounced by users. - * For a word that is not in the service's base vocabulary, omit the parameter to have - the service automatically generate a sounds-like pronunciation for the word. - * For a word that is in the service's base vocabulary, use the parameter to specify - additional pronunciations for the word. You cannot override the default pronunciation - of a word; pronunciations you add augment the pronunciation from the base vocabulary. - A word can have at most five sounds-like pronunciations. A pronunciation can include - at most 40 characters not including spaces. - :attr str display_as: (optional) An alternative spelling for the custom word when it - appears in a transcript. Use the parameter when you want the word to have a spelling - that is different from its usual representation or from its spelling in corpora - training data. + Information about the grammars from a custom language model. + + :param List[Grammar] grammars: An array of `Grammar` objects that provides + information about the grammars for the custom model. The array is empty if the + custom model has no grammars. """ - def __init__(self, word=None, sounds_like=None, display_as=None): + def __init__( + self, + grammars: List['Grammar'], + ) -> None: """ - Initialize a CustomWord object. + Initialize a Grammars object. - :param str word: (optional) For the **Add custom words** method, you must specify - the custom word that is to be added to or updated in the custom model. Do not - include spaces in the word. Use a `-` (dash) or `_` (underscore) to connect the - tokens of compound words. - Omit this parameter for the **Add a custom word** method. - :param list[str] sounds_like: (optional) An array of sounds-like pronunciations - for the custom word. Specify how words that are difficult to pronounce, foreign - words, acronyms, and so on can be pronounced by users. - * For a word that is not in the service's base vocabulary, omit the parameter to - have the service automatically generate a sounds-like pronunciation for the word. - * For a word that is in the service's base vocabulary, use the parameter to - specify additional pronunciations for the word. You cannot override the default - pronunciation of a word; pronunciations you add augment the pronunciation from the - base vocabulary. - A word can have at most five sounds-like pronunciations. A pronunciation can - include at most 40 characters not including spaces. - :param str display_as: (optional) An alternative spelling for the custom word when - it appears in a transcript. Use the parameter when you want the word to have a - spelling that is different from its usual representation or from its spelling in - corpora training data. + :param List[Grammar] grammars: An array of `Grammar` objects that provides + information about the grammars for the custom model. The array is empty if + the custom model has no grammars. """ - self.word = word - self.sounds_like = sounds_like - self.display_as = display_as + self.grammars = grammars @classmethod - def _from_dict(cls, _dict): - """Initialize a CustomWord object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'Grammars': + """Initialize a Grammars object from a json dictionary.""" args = {} - if 'word' in _dict: - args['word'] = _dict.get('word') - if 'sounds_like' in _dict: - args['sounds_like'] = _dict.get('sounds_like') - if 'display_as' in _dict: - args['display_as'] = _dict.get('display_as') + if (grammars := _dict.get('grammars')) is not None: + args['grammars'] = [Grammar.from_dict(v) for v in grammars] + else: + raise ValueError( + 'Required property \'grammars\' not present in Grammars JSON') return cls(**args) + @classmethod + def _from_dict(cls, _dict): + """Initialize a Grammars object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'grammars') and self.grammars is not None: + grammars_list = [] + for v in self.grammars: + if isinstance(v, dict): + grammars_list.append(v) + else: + grammars_list.append(v.to_dict()) + _dict['grammars'] = grammars_list + return _dict + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Grammars object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'Grammars') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'Grammars') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class KeywordResult: + """ + Information about a match for a keyword from speech recognition results. + + :param str normalized_text: A specified keyword normalized to the spoken phrase + that matched in the audio input. + :param float start_time: The start time in seconds of the keyword match. + :param float end_time: The end time in seconds of the keyword match. + :param float confidence: A confidence score for the keyword match in the range + of 0.0 to 1.0. + """ + + def __init__( + self, + normalized_text: str, + start_time: float, + end_time: float, + confidence: float, + ) -> None: + """ + Initialize a KeywordResult object. + + :param str normalized_text: A specified keyword normalized to the spoken + phrase that matched in the audio input. + :param float start_time: The start time in seconds of the keyword match. + :param float end_time: The end time in seconds of the keyword match. + :param float confidence: A confidence score for the keyword match in the + range of 0.0 to 1.0. + """ + self.normalized_text = normalized_text + self.start_time = start_time + self.end_time = end_time + self.confidence = confidence + + @classmethod + def from_dict(cls, _dict: Dict) -> 'KeywordResult': + """Initialize a KeywordResult object from a json dictionary.""" + args = {} + if (normalized_text := _dict.get('normalized_text')) is not None: + args['normalized_text'] = normalized_text + else: + raise ValueError( + 'Required property \'normalized_text\' not present in KeywordResult JSON' + ) + if (start_time := _dict.get('start_time')) is not None: + args['start_time'] = start_time + else: + raise ValueError( + 'Required property \'start_time\' not present in KeywordResult JSON' + ) + if (end_time := _dict.get('end_time')) is not None: + args['end_time'] = end_time + else: + raise ValueError( + 'Required property \'end_time\' not present in KeywordResult JSON' + ) + if (confidence := _dict.get('confidence')) is not None: + args['confidence'] = confidence + else: + raise ValueError( + 'Required property \'confidence\' not present in KeywordResult JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a KeywordResult object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'word') and self.word is not None: - _dict['word'] = self.word - if hasattr(self, 'sounds_like') and self.sounds_like is not None: - _dict['sounds_like'] = self.sounds_like - if hasattr(self, 'display_as') and self.display_as is not None: - _dict['display_as'] = self.display_as + if hasattr(self, + 'normalized_text') and self.normalized_text is not None: + _dict['normalized_text'] = self.normalized_text + if hasattr(self, 'start_time') and self.start_time is not None: + _dict['start_time'] = self.start_time + if hasattr(self, 'end_time') and self.end_time is not None: + _dict['end_time'] = self.end_time + if hasattr(self, 'confidence') and self.confidence is not None: + _dict['confidence'] = self.confidence return _dict - def __str__(self): - """Return a `str` version of this CustomWord object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __eq__(self, other): + def __str__(self) -> str: + """Return a `str` version of this KeywordResult object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'KeywordResult') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'KeywordResult') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class Grammar(object): +class LanguageDetectionResult: """ - Grammar. - - :attr str name: The name of the grammar. - :attr int out_of_vocabulary_words: The number of OOV words in the grammar. The value - is `0` while the grammar is being processed. - :attr str status: The status of the grammar: - * `analyzed`: The service successfully analyzed the grammar. The custom model can be - trained with data from the grammar. - * `being_processed`: The service is still analyzing the grammar. The service cannot - accept requests to add new resources or to train the custom model. - * `undetermined`: The service encountered an error while processing the grammar. The - `error` field describes the failure. - :attr str error: (optional) If the status of the grammar is `undetermined`, the - following message: `Analysis of grammar '{grammar_name}' failed. Please try fixing the - error or adding the grammar again by setting the 'allow_overwrite' flag to 'true'.`. + Language detection results. + + :param List[LanguageInfo] language_info: (optional) An array of `LanguageInfo` + objects. """ - def __init__(self, name, out_of_vocabulary_words, status, error=None): + def __init__( + self, + *, + language_info: Optional[List['LanguageInfo']] = None, + ) -> None: """ - Initialize a Grammar object. + Initialize a LanguageDetectionResult object. - :param str name: The name of the grammar. - :param int out_of_vocabulary_words: The number of OOV words in the grammar. The - value is `0` while the grammar is being processed. - :param str status: The status of the grammar: - * `analyzed`: The service successfully analyzed the grammar. The custom model can - be trained with data from the grammar. - * `being_processed`: The service is still analyzing the grammar. The service - cannot accept requests to add new resources or to train the custom model. - * `undetermined`: The service encountered an error while processing the grammar. - The `error` field describes the failure. - :param str error: (optional) If the status of the grammar is `undetermined`, the - following message: `Analysis of grammar '{grammar_name}' failed. Please try fixing - the error or adding the grammar again by setting the 'allow_overwrite' flag to - 'true'.`. + :param List[LanguageInfo] language_info: (optional) An array of + `LanguageInfo` objects. """ - self.name = name - self.out_of_vocabulary_words = out_of_vocabulary_words - self.status = status - self.error = error + self.language_info = language_info @classmethod - def _from_dict(cls, _dict): - """Initialize a Grammar object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'LanguageDetectionResult': + """Initialize a LanguageDetectionResult object from a json dictionary.""" args = {} - if 'name' in _dict: - args['name'] = _dict.get('name') - else: - raise ValueError( - 'Required property \'name\' not present in Grammar JSON') - if 'out_of_vocabulary_words' in _dict: - args['out_of_vocabulary_words'] = _dict.get( - 'out_of_vocabulary_words') - else: - raise ValueError( - 'Required property \'out_of_vocabulary_words\' not present in Grammar JSON' - ) - if 'status' in _dict: - args['status'] = _dict.get('status') - else: - raise ValueError( - 'Required property \'status\' not present in Grammar JSON') - if 'error' in _dict: - args['error'] = _dict.get('error') + if (language_info := _dict.get('language_info')) is not None: + args['language_info'] = [ + LanguageInfo.from_dict(v) for v in language_info + ] return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a LanguageDetectionResult object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'name') and self.name is not None: - _dict['name'] = self.name - if hasattr(self, 'out_of_vocabulary_words' - ) and self.out_of_vocabulary_words is not None: - _dict['out_of_vocabulary_words'] = self.out_of_vocabulary_words - if hasattr(self, 'status') and self.status is not None: - _dict['status'] = self.status - if hasattr(self, 'error') and self.error is not None: - _dict['error'] = self.error + if hasattr(self, 'language_info') and self.language_info is not None: + language_info_list = [] + for v in self.language_info: + if isinstance(v, dict): + language_info_list.append(v) + else: + language_info_list.append(v.to_dict()) + _dict['language_info'] = language_info_list return _dict - def __str__(self): - """Return a `str` version of this Grammar object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __eq__(self, other): + def __str__(self) -> str: + """Return a `str` version of this LanguageDetectionResult object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'LanguageDetectionResult') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'LanguageDetectionResult') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class Grammars(object): +class LanguageDetectionResults: """ - Grammars. - - :attr list[Grammar] grammars: An array of `Grammar` objects that provides information - about the grammars for the custom model. The array is empty if the custom model has no - grammars. + Language detection results. + + :param List[LanguageDetectionResult] results: (optional) An array of + `LanguageDetectionResult` objects. + :param int result_index: (optional) An index that indicates a change point in + the `results` array. The service increments the index for additional results + that it sends for new audio for the same request. All results with the same + index are delivered at the same time. The same index can include multiple final + results that are delivered with the same response. """ - def __init__(self, grammars): - """ - Initialize a Grammars object. - - :param list[Grammar] grammars: An array of `Grammar` objects that provides - information about the grammars for the custom model. The array is empty if the - custom model has no grammars. + def __init__( + self, + *, + results: Optional[List['LanguageDetectionResult']] = None, + result_index: Optional[int] = None, + ) -> None: + """ + Initialize a LanguageDetectionResults object. + + :param List[LanguageDetectionResult] results: (optional) An array of + `LanguageDetectionResult` objects. + :param int result_index: (optional) An index that indicates a change point + in the `results` array. The service increments the index for additional + results that it sends for new audio for the same request. All results with + the same index are delivered at the same time. The same index can include + multiple final results that are delivered with the same response. """ - self.grammars = grammars + self.results = results + self.result_index = result_index @classmethod - def _from_dict(cls, _dict): - """Initialize a Grammars object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'LanguageDetectionResults': + """Initialize a LanguageDetectionResults object from a json dictionary.""" args = {} - if 'grammars' in _dict: - args['grammars'] = [ - Grammar._from_dict(x) for x in (_dict.get('grammars')) + if (results := _dict.get('results')) is not None: + args['results'] = [ + LanguageDetectionResult.from_dict(v) for v in results ] - else: - raise ValueError( - 'Required property \'grammars\' not present in Grammars JSON') + if (result_index := _dict.get('result_index')) is not None: + args['result_index'] = result_index return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a LanguageDetectionResults object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'grammars') and self.grammars is not None: - _dict['grammars'] = [x._to_dict() for x in self.grammars] + if hasattr(self, 'results') and self.results is not None: + results_list = [] + for v in self.results: + if isinstance(v, dict): + results_list.append(v) + else: + results_list.append(v.to_dict()) + _dict['results'] = results_list + if hasattr(self, 'result_index') and self.result_index is not None: + _dict['result_index'] = self.result_index return _dict - def __str__(self): - """Return a `str` version of this Grammars object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() - def __eq__(self, other): + def __str__(self) -> str: + """Return a `str` version of this LanguageDetectionResults object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'LanguageDetectionResults') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'LanguageDetectionResults') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class KeywordResult(object): +class LanguageInfo: """ - KeywordResult. - - :attr str normalized_text: A specified keyword normalized to the spoken phrase that - matched in the audio input. - :attr float start_time: The start time in seconds of the keyword match. - :attr float end_time: The end time in seconds of the keyword match. - :attr float confidence: A confidence score for the keyword match in the range of 0.0 - to 1.0. + Language detection info such as confidence and language detected. + + :param float confidence: (optional) A score that indicates the service's + confidence in its identification of the language in the range of 0.0 to 1.0. + :param str language: (optional) The language detected in standard abbreviated + ISO 639 format. + :param float timestamp: (optional) The timestamp of the detected language. """ - def __init__(self, normalized_text, start_time, end_time, confidence): + def __init__( + self, + *, + confidence: Optional[float] = None, + language: Optional[str] = None, + timestamp: Optional[float] = None, + ) -> None: """ - Initialize a KeywordResult object. + Initialize a LanguageInfo object. - :param str normalized_text: A specified keyword normalized to the spoken phrase - that matched in the audio input. - :param float start_time: The start time in seconds of the keyword match. - :param float end_time: The end time in seconds of the keyword match. - :param float confidence: A confidence score for the keyword match in the range of - 0.0 to 1.0. + :param float confidence: (optional) A score that indicates the service's + confidence in its identification of the language in the range of 0.0 to + 1.0. + :param str language: (optional) The language detected in standard + abbreviated ISO 639 format. + :param float timestamp: (optional) The timestamp of the detected language. """ - self.normalized_text = normalized_text - self.start_time = start_time - self.end_time = end_time self.confidence = confidence + self.language = language + self.timestamp = timestamp @classmethod - def _from_dict(cls, _dict): - """Initialize a KeywordResult object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'LanguageInfo': + """Initialize a LanguageInfo object from a json dictionary.""" args = {} - if 'normalized_text' in _dict: - args['normalized_text'] = _dict.get('normalized_text') - else: - raise ValueError( - 'Required property \'normalized_text\' not present in KeywordResult JSON' - ) - if 'start_time' in _dict: - args['start_time'] = _dict.get('start_time') - else: - raise ValueError( - 'Required property \'start_time\' not present in KeywordResult JSON' - ) - if 'end_time' in _dict: - args['end_time'] = _dict.get('end_time') - else: - raise ValueError( - 'Required property \'end_time\' not present in KeywordResult JSON' - ) - if 'confidence' in _dict: - args['confidence'] = _dict.get('confidence') - else: - raise ValueError( - 'Required property \'confidence\' not present in KeywordResult JSON' - ) + if (confidence := _dict.get('confidence')) is not None: + args['confidence'] = confidence + if (language := _dict.get('language')) is not None: + args['language'] = language + if (timestamp := _dict.get('timestamp')) is not None: + args['timestamp'] = timestamp return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a LanguageInfo object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, - 'normalized_text') and self.normalized_text is not None: - _dict['normalized_text'] = self.normalized_text - if hasattr(self, 'start_time') and self.start_time is not None: - _dict['start_time'] = self.start_time - if hasattr(self, 'end_time') and self.end_time is not None: - _dict['end_time'] = self.end_time if hasattr(self, 'confidence') and self.confidence is not None: _dict['confidence'] = self.confidence + if hasattr(self, 'language') and self.language is not None: + _dict['language'] = self.language + if hasattr(self, 'timestamp') and self.timestamp is not None: + _dict['timestamp'] = self.timestamp return _dict - def __str__(self): - """Return a `str` version of this KeywordResult object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this LanguageInfo object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'LanguageInfo') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'LanguageInfo') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class LanguageModel(object): +class LanguageModel: """ - LanguageModel. - - :attr str customization_id: The customization ID (GUID) of the custom language model. - The **Create a custom language model** method returns only this field of the object; - it does not return the other fields. - :attr str created: (optional) The date and time in Coordinated Universal Time (UTC) at - which the custom language model was created. The value is provided in full ISO 8601 - format (`YYYY-MM-DDThh:mm:ss.sTZD`). - :attr str language: (optional) The language identifier of the custom language model - (for example, `en-US`). - :attr str dialect: (optional) The dialect of the language for the custom language - model. By default, the dialect matches the language of the base model; for example, - `en-US` for either of the US English language models. For Spanish models, the field - indicates the dialect for which the model was created: - * `es-ES` for Castilian Spanish (the default) - * `es-LA` for Latin American Spanish - * `es-US` for North American (Mexican) Spanish. - :attr list[str] versions: (optional) A list of the available versions of the custom - language model. Each element of the array indicates a version of the base model with - which the custom model can be used. Multiple versions exist only if the custom model - has been upgraded; otherwise, only a single version is shown. - :attr str owner: (optional) The GUID of the credentials for the instance of the - service that owns the custom language model. - :attr str name: (optional) The name of the custom language model. - :attr str description: (optional) The description of the custom language model. - :attr str base_model_name: (optional) The name of the language model for which the - custom language model was created. - :attr str status: (optional) The current status of the custom language model: - * `pending`: The model was created but is waiting either for training data to be added - or for the service to finish analyzing added data. - * `ready`: The model contains data and is ready to be trained. - * `training`: The model is currently being trained. - * `available`: The model is trained and ready to use. - * `upgrading`: The model is currently being upgraded. - * `failed`: Training of the model failed. - :attr int progress: (optional) A percentage that indicates the progress of the custom - language model's current training. A value of `100` means that the model is fully - trained. **Note:** The `progress` field does not currently reflect the progress of the - training. The field changes from `0` to `100` when training is complete. - :attr str error: (optional) If an error occurred while adding a grammar file to the - custom language model, a message that describes an `Internal Server Error` and - includes the string `Cannot compile grammar`. The status of the custom model is not - affected by the error, but the grammar cannot be used with the model. - :attr str warnings: (optional) If the request included unknown parameters, the - following message: `Unexpected query parameter(s) ['parameters'] detected`, where - `parameters` is a list that includes a quoted string for each unknown parameter. + Information about an existing custom language model. + + :param str customization_id: The customization ID (GUID) of the custom language + model. The [Create a custom language model](#createlanguagemodel) method returns + only this field of the object; it does not return the other fields. + :param str created: (optional) The date and time in Coordinated Universal Time + (UTC) at which the custom language model was created. The value is provided in + full ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`). + :param str updated: (optional) The date and time in Coordinated Universal Time + (UTC) at which the custom language model was last modified. The `created` and + `updated` fields are equal when a language model is first added but has yet to + be updated. The value is provided in full ISO 8601 format + (YYYY-MM-DDThh:mm:ss.sTZD). + :param str language: (optional) The language identifier of the custom language + model (for example, `en-US`). The value matches the five-character language + identifier from the name of the base model for the custom model. This value + might be different from the value of the `dialect` field. + :param str dialect: (optional) The dialect of the language for the custom + language model. _For custom models that are based on non-Spanish + previous-generation models and on next-generation models,_ the field matches the + language of the base model; for example, `en-US` for one of the US English + models. _For custom models that are based on Spanish previous-generation + models,_ the field indicates the dialect with which the model was created. The + value can match the name of the base model or, if it was specified by the user, + can be one of the following: + * `es-ES` for Castilian Spanish (`es-ES` models) + * `es-LA` for Latin American Spanish (`es-AR`, `es-CL`, `es-CO`, and `es-PE` + models) + * `es-US` for Mexican (North American) Spanish (`es-MX` models) + Dialect values are case-insensitive. + :param List[str] versions: (optional) A list of the available versions of the + custom language model. Each element of the array indicates a version of the base + model with which the custom model can be used. Multiple versions exist only if + the custom model has been upgraded to a new version of its base model. + Otherwise, only a single version is shown. + :param str owner: (optional) The GUID of the credentials for the instance of the + service that owns the custom language model. + :param str name: (optional) The name of the custom language model. + :param str description: (optional) The description of the custom language model. + :param str base_model_name: (optional) The name of the language model for which + the custom language model was created. + :param str status: (optional) The current status of the custom language model: + * `pending`: The model was created but is waiting either for valid training data + to be added or for the service to finish analyzing added data. + * `ready`: The model contains valid data and is ready to be trained. If the + model contains a mix of valid and invalid resources, you need to set the + `strict` parameter to `false` for the training to proceed. + * `training`: The model is currently being trained. + * `available`: The model is trained and ready to use. + * `upgrading`: The model is currently being upgraded. + * `failed`: Training of the model failed. + :param int progress: (optional) A percentage that indicates the progress of the + custom language model's current training. A value of `100` means that the model + is fully trained. **Note:** The `progress` field does not currently reflect the + progress of the training. The field changes from `0` to `100` when training is + complete. + :param str error: (optional) If an error occurred while adding a grammar file to + the custom language model, a message that describes an `Internal Server Error` + and includes the string `Cannot compile grammar`. The status of the custom model + is not affected by the error, but the grammar cannot be used with the model. + :param str warnings: (optional) If the request included unknown parameters, the + following message: `Unexpected query parameter(s) ['parameters'] detected`, + where `parameters` is a list that includes a quoted string for each unknown + parameter. """ - def __init__(self, - customization_id, - created=None, - language=None, - dialect=None, - versions=None, - owner=None, - name=None, - description=None, - base_model_name=None, - status=None, - progress=None, - error=None, - warnings=None): + def __init__( + self, + customization_id: str, + *, + created: Optional[str] = None, + updated: Optional[str] = None, + language: Optional[str] = None, + dialect: Optional[str] = None, + versions: Optional[List[str]] = None, + owner: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + base_model_name: Optional[str] = None, + status: Optional[str] = None, + progress: Optional[int] = None, + error: Optional[str] = None, + warnings: Optional[str] = None, + ) -> None: """ Initialize a LanguageModel object. - :param str customization_id: The customization ID (GUID) of the custom language - model. The **Create a custom language model** method returns only this field of - the object; it does not return the other fields. - :param str created: (optional) The date and time in Coordinated Universal Time - (UTC) at which the custom language model was created. The value is provided in - full ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`). - :param str language: (optional) The language identifier of the custom language - model (for example, `en-US`). - :param str dialect: (optional) The dialect of the language for the custom language - model. By default, the dialect matches the language of the base model; for - example, `en-US` for either of the US English language models. For Spanish models, - the field indicates the dialect for which the model was created: - * `es-ES` for Castilian Spanish (the default) - * `es-LA` for Latin American Spanish - * `es-US` for North American (Mexican) Spanish. - :param list[str] versions: (optional) A list of the available versions of the - custom language model. Each element of the array indicates a version of the base - model with which the custom model can be used. Multiple versions exist only if the - custom model has been upgraded; otherwise, only a single version is shown. - :param str owner: (optional) The GUID of the credentials for the instance of the - service that owns the custom language model. + :param str customization_id: The customization ID (GUID) of the custom + language model. The [Create a custom language model](#createlanguagemodel) + method returns only this field of the object; it does not return the other + fields. + :param str created: (optional) The date and time in Coordinated Universal + Time (UTC) at which the custom language model was created. The value is + provided in full ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`). + :param str updated: (optional) The date and time in Coordinated Universal + Time (UTC) at which the custom language model was last modified. The + `created` and `updated` fields are equal when a language model is first + added but has yet to be updated. The value is provided in full ISO 8601 + format (YYYY-MM-DDThh:mm:ss.sTZD). + :param str language: (optional) The language identifier of the custom + language model (for example, `en-US`). The value matches the five-character + language identifier from the name of the base model for the custom model. + This value might be different from the value of the `dialect` field. + :param str dialect: (optional) The dialect of the language for the custom + language model. _For custom models that are based on non-Spanish + previous-generation models and on next-generation models,_ the field + matches the language of the base model; for example, `en-US` for one of the + US English models. _For custom models that are based on Spanish + previous-generation models,_ the field indicates the dialect with which the + model was created. The value can match the name of the base model or, if it + was specified by the user, can be one of the following: + * `es-ES` for Castilian Spanish (`es-ES` models) + * `es-LA` for Latin American Spanish (`es-AR`, `es-CL`, `es-CO`, and + `es-PE` models) + * `es-US` for Mexican (North American) Spanish (`es-MX` models) + Dialect values are case-insensitive. + :param List[str] versions: (optional) A list of the available versions of + the custom language model. Each element of the array indicates a version of + the base model with which the custom model can be used. Multiple versions + exist only if the custom model has been upgraded to a new version of its + base model. Otherwise, only a single version is shown. + :param str owner: (optional) The GUID of the credentials for the instance + of the service that owns the custom language model. :param str name: (optional) The name of the custom language model. - :param str description: (optional) The description of the custom language model. - :param str base_model_name: (optional) The name of the language model for which - the custom language model was created. - :param str status: (optional) The current status of the custom language model: - * `pending`: The model was created but is waiting either for training data to be - added or for the service to finish analyzing added data. - * `ready`: The model contains data and is ready to be trained. - * `training`: The model is currently being trained. - * `available`: The model is trained and ready to use. - * `upgrading`: The model is currently being upgraded. - * `failed`: Training of the model failed. - :param int progress: (optional) A percentage that indicates the progress of the - custom language model's current training. A value of `100` means that the model is - fully trained. **Note:** The `progress` field does not currently reflect the - progress of the training. The field changes from `0` to `100` when training is - complete. - :param str error: (optional) If an error occurred while adding a grammar file to - the custom language model, a message that describes an `Internal Server Error` and - includes the string `Cannot compile grammar`. The status of the custom model is - not affected by the error, but the grammar cannot be used with the model. - :param str warnings: (optional) If the request included unknown parameters, the - following message: `Unexpected query parameter(s) ['parameters'] detected`, where - `parameters` is a list that includes a quoted string for each unknown parameter. + :param str description: (optional) The description of the custom language + model. + :param str base_model_name: (optional) The name of the language model for + which the custom language model was created. + :param str status: (optional) The current status of the custom language + model: + * `pending`: The model was created but is waiting either for valid training + data to be added or for the service to finish analyzing added data. + * `ready`: The model contains valid data and is ready to be trained. If the + model contains a mix of valid and invalid resources, you need to set the + `strict` parameter to `false` for the training to proceed. + * `training`: The model is currently being trained. + * `available`: The model is trained and ready to use. + * `upgrading`: The model is currently being upgraded. + * `failed`: Training of the model failed. + :param int progress: (optional) A percentage that indicates the progress of + the custom language model's current training. A value of `100` means that + the model is fully trained. **Note:** The `progress` field does not + currently reflect the progress of the training. The field changes from `0` + to `100` when training is complete. + :param str error: (optional) If an error occurred while adding a grammar + file to the custom language model, a message that describes an `Internal + Server Error` and includes the string `Cannot compile grammar`. The status + of the custom model is not affected by the error, but the grammar cannot be + used with the model. + :param str warnings: (optional) If the request included unknown parameters, + the following message: `Unexpected query parameter(s) ['parameters'] + detected`, where `parameters` is a list that includes a quoted string for + each unknown parameter. """ self.customization_id = customization_id self.created = created + self.updated = updated self.language = language self.dialect = dialect self.versions = versions @@ -4080,42 +7654,49 @@ def __init__(self, self.warnings = warnings @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'LanguageModel': """Initialize a LanguageModel object from a json dictionary.""" args = {} - if 'customization_id' in _dict: - args['customization_id'] = _dict.get('customization_id') + if (customization_id := _dict.get('customization_id')) is not None: + args['customization_id'] = customization_id else: raise ValueError( 'Required property \'customization_id\' not present in LanguageModel JSON' ) - if 'created' in _dict: - args['created'] = _dict.get('created') - if 'language' in _dict: - args['language'] = _dict.get('language') - if 'dialect' in _dict: - args['dialect'] = _dict.get('dialect') - if 'versions' in _dict: - args['versions'] = _dict.get('versions') - if 'owner' in _dict: - args['owner'] = _dict.get('owner') - if 'name' in _dict: - args['name'] = _dict.get('name') - if 'description' in _dict: - args['description'] = _dict.get('description') - if 'base_model_name' in _dict: - args['base_model_name'] = _dict.get('base_model_name') - if 'status' in _dict: - args['status'] = _dict.get('status') - if 'progress' in _dict: - args['progress'] = _dict.get('progress') - if 'error' in _dict: - args['error'] = _dict.get('error') - if 'warnings' in _dict: - args['warnings'] = _dict.get('warnings') + if (created := _dict.get('created')) is not None: + args['created'] = created + if (updated := _dict.get('updated')) is not None: + args['updated'] = updated + if (language := _dict.get('language')) is not None: + args['language'] = language + if (dialect := _dict.get('dialect')) is not None: + args['dialect'] = dialect + if (versions := _dict.get('versions')) is not None: + args['versions'] = versions + if (owner := _dict.get('owner')) is not None: + args['owner'] = owner + if (name := _dict.get('name')) is not None: + args['name'] = name + if (description := _dict.get('description')) is not None: + args['description'] = description + if (base_model_name := _dict.get('base_model_name')) is not None: + args['base_model_name'] = base_model_name + if (status := _dict.get('status')) is not None: + args['status'] = status + if (progress := _dict.get('progress')) is not None: + args['progress'] = progress + if (error := _dict.get('error')) is not None: + args['error'] = error + if (warnings := _dict.get('warnings')) is not None: + args['warnings'] = warnings return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a LanguageModel object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, @@ -4123,6 +7704,8 @@ def _to_dict(self): _dict['customization_id'] = self.customization_id if hasattr(self, 'created') and self.created is not None: _dict['created'] = self.created + if hasattr(self, 'updated') and self.updated is not None: + _dict['updated'] = self.updated if hasattr(self, 'language') and self.language is not None: _dict['language'] = self.language if hasattr(self, 'dialect') and self.dialect is not None: @@ -4148,50 +7731,79 @@ def _to_dict(self): _dict['warnings'] = self.warnings return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this LanguageModel object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'LanguageModel') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'LanguageModel') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other + class StatusEnum(str, Enum): + """ + The current status of the custom language model: + * `pending`: The model was created but is waiting either for valid training data + to be added or for the service to finish analyzing added data. + * `ready`: The model contains valid data and is ready to be trained. If the model + contains a mix of valid and invalid resources, you need to set the `strict` + parameter to `false` for the training to proceed. + * `training`: The model is currently being trained. + * `available`: The model is trained and ready to use. + * `upgrading`: The model is currently being upgraded. + * `failed`: Training of the model failed. + """ + + PENDING = 'pending' + READY = 'ready' + TRAINING = 'training' + AVAILABLE = 'available' + UPGRADING = 'upgrading' + FAILED = 'failed' -class LanguageModels(object): + +class LanguageModels: """ - LanguageModels. + Information about existing custom language models. - :attr list[LanguageModel] customizations: An array of `LanguageModel` objects that - provides information about each available custom language model. The array is empty if - the requesting credentials own no custom language models (if no language is specified) - or own no custom language models for the specified language. + :param List[LanguageModel] customizations: An array of `LanguageModel` objects + that provides information about each available custom language model. The array + is empty if the requesting credentials own no custom language models (if no + language is specified) or own no custom language models for the specified + language. """ - def __init__(self, customizations): + def __init__( + self, + customizations: List['LanguageModel'], + ) -> None: """ Initialize a LanguageModels object. - :param list[LanguageModel] customizations: An array of `LanguageModel` objects - that provides information about each available custom language model. The array is - empty if the requesting credentials own no custom language models (if no language - is specified) or own no custom language models for the specified language. + :param List[LanguageModel] customizations: An array of `LanguageModel` + objects that provides information about each available custom language + model. The array is empty if the requesting credentials own no custom + language models (if no language is specified) or own no custom language + models for the specified language. """ self.customizations = customizations @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'LanguageModels': """Initialize a LanguageModels object from a json dictionary.""" args = {} - if 'customizations' in _dict: + if (customizations := _dict.get('customizations')) is not None: args['customizations'] = [ - LanguageModel._from_dict(x) - for x in (_dict.get('customizations')) + LanguageModel.from_dict(v) for v in customizations ] else: raise ValueError( @@ -4199,116 +7811,398 @@ def _from_dict(cls, _dict): ) return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a LanguageModels object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'customizations') and self.customizations is not None: - _dict['customizations'] = [ - x._to_dict() for x in self.customizations - ] + customizations_list = [] + for v in self.customizations: + if isinstance(v, dict): + customizations_list.append(v) + else: + customizations_list.append(v.to_dict()) + _dict['customizations'] = customizations_list return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this LanguageModels object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'LanguageModels') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'LanguageModels') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ProcessedAudio: + """ + Detailed timing information about the service's processing of the input audio. + + :param float received: The seconds of audio that the service has received as of + this response. The value of the field is greater than the values of the + `transcription` and `speaker_labels` fields during speech recognition + processing, since the service first has to receive the audio before it can begin + to process it. The final value can also be greater than the value of the + `transcription` and `speaker_labels` fields by a fractional number of seconds. + :param float seen_by_engine: The seconds of audio that the service has passed to + its speech-processing engine as of this response. The value of the field is + greater than the values of the `transcription` and `speaker_labels` fields + during speech recognition processing. The `received` and `seen_by_engine` fields + have identical values when the service has finished processing all audio. This + final value can be greater than the value of the `transcription` and + `speaker_labels` fields by a fractional number of seconds. + :param float transcription: The seconds of audio that the service has processed + for speech recognition as of this response. + :param float speaker_labels: (optional) If speaker labels are requested, the + seconds of audio that the service has processed to determine speaker labels as + of this response. This value often trails the value of the `transcription` field + during speech recognition processing. The `transcription` and `speaker_labels` + fields have identical values when the service has finished processing all audio. + """ + + def __init__( + self, + received: float, + seen_by_engine: float, + transcription: float, + *, + speaker_labels: Optional[float] = None, + ) -> None: + """ + Initialize a ProcessedAudio object. + + :param float received: The seconds of audio that the service has received + as of this response. The value of the field is greater than the values of + the `transcription` and `speaker_labels` fields during speech recognition + processing, since the service first has to receive the audio before it can + begin to process it. The final value can also be greater than the value of + the `transcription` and `speaker_labels` fields by a fractional number of + seconds. + :param float seen_by_engine: The seconds of audio that the service has + passed to its speech-processing engine as of this response. The value of + the field is greater than the values of the `transcription` and + `speaker_labels` fields during speech recognition processing. The + `received` and `seen_by_engine` fields have identical values when the + service has finished processing all audio. This final value can be greater + than the value of the `transcription` and `speaker_labels` fields by a + fractional number of seconds. + :param float transcription: The seconds of audio that the service has + processed for speech recognition as of this response. + :param float speaker_labels: (optional) If speaker labels are requested, + the seconds of audio that the service has processed to determine speaker + labels as of this response. This value often trails the value of the + `transcription` field during speech recognition processing. The + `transcription` and `speaker_labels` fields have identical values when the + service has finished processing all audio. + """ + self.received = received + self.seen_by_engine = seen_by_engine + self.transcription = transcription + self.speaker_labels = speaker_labels + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ProcessedAudio': + """Initialize a ProcessedAudio object from a json dictionary.""" + args = {} + if (received := _dict.get('received')) is not None: + args['received'] = received + else: + raise ValueError( + 'Required property \'received\' not present in ProcessedAudio JSON' + ) + if (seen_by_engine := _dict.get('seen_by_engine')) is not None: + args['seen_by_engine'] = seen_by_engine + else: + raise ValueError( + 'Required property \'seen_by_engine\' not present in ProcessedAudio JSON' + ) + if (transcription := _dict.get('transcription')) is not None: + args['transcription'] = transcription + else: + raise ValueError( + 'Required property \'transcription\' not present in ProcessedAudio JSON' + ) + if (speaker_labels := _dict.get('speaker_labels')) is not None: + args['speaker_labels'] = speaker_labels + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ProcessedAudio object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'received') and self.received is not None: + _dict['received'] = self.received + if hasattr(self, 'seen_by_engine') and self.seen_by_engine is not None: + _dict['seen_by_engine'] = self.seen_by_engine + if hasattr(self, 'transcription') and self.transcription is not None: + _dict['transcription'] = self.transcription + if hasattr(self, 'speaker_labels') and self.speaker_labels is not None: + _dict['speaker_labels'] = self.speaker_labels + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ProcessedAudio object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'ProcessedAudio') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'ProcessedAudio') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class ProcessingMetrics: + """ + If processing metrics are requested, information about the service's processing of the + input audio. Processing metrics are not available with the synchronous [Recognize + audio](#recognize) method. + + :param ProcessedAudio processed_audio: Detailed timing information about the + service's processing of the input audio. + :param float wall_clock_since_first_byte_received: The amount of real time in + seconds that has passed since the service received the first byte of input + audio. Values in this field are generally multiples of the specified metrics + interval, with two differences: + * Values might not reflect exact intervals (for instance, 0.25, 0.5, and so on). + Actual values might be 0.27, 0.52, and so on, depending on when the service + receives and processes audio. + * The service also returns values for transcription events if you set the + `interim_results` parameter to `true`. The service returns both processing + metrics and transcription results when such events occur. + :param bool periodic: An indication of whether the metrics apply to a periodic + interval or a transcription event: + * `true` means that the response was triggered by a specified processing + interval. The information contains processing metrics only. + * `false` means that the response was triggered by a transcription event. The + information contains processing metrics plus transcription results. + Use the field to identify why the service generated the response and to filter + different results if necessary. + """ + + def __init__( + self, + processed_audio: 'ProcessedAudio', + wall_clock_since_first_byte_received: float, + periodic: bool, + ) -> None: + """ + Initialize a ProcessingMetrics object. + + :param ProcessedAudio processed_audio: Detailed timing information about + the service's processing of the input audio. + :param float wall_clock_since_first_byte_received: The amount of real time + in seconds that has passed since the service received the first byte of + input audio. Values in this field are generally multiples of the specified + metrics interval, with two differences: + * Values might not reflect exact intervals (for instance, 0.25, 0.5, and so + on). Actual values might be 0.27, 0.52, and so on, depending on when the + service receives and processes audio. + * The service also returns values for transcription events if you set the + `interim_results` parameter to `true`. The service returns both processing + metrics and transcription results when such events occur. + :param bool periodic: An indication of whether the metrics apply to a + periodic interval or a transcription event: + * `true` means that the response was triggered by a specified processing + interval. The information contains processing metrics only. + * `false` means that the response was triggered by a transcription event. + The information contains processing metrics plus transcription results. + Use the field to identify why the service generated the response and to + filter different results if necessary. + """ + self.processed_audio = processed_audio + self.wall_clock_since_first_byte_received = wall_clock_since_first_byte_received + self.periodic = periodic + + @classmethod + def from_dict(cls, _dict: Dict) -> 'ProcessingMetrics': + """Initialize a ProcessingMetrics object from a json dictionary.""" + args = {} + if (processed_audio := _dict.get('processed_audio')) is not None: + args['processed_audio'] = ProcessedAudio.from_dict(processed_audio) + else: + raise ValueError( + 'Required property \'processed_audio\' not present in ProcessingMetrics JSON' + ) + if (wall_clock_since_first_byte_received := + _dict.get('wall_clock_since_first_byte_received')) is not None: + args[ + 'wall_clock_since_first_byte_received'] = wall_clock_since_first_byte_received + else: + raise ValueError( + 'Required property \'wall_clock_since_first_byte_received\' not present in ProcessingMetrics JSON' + ) + if (periodic := _dict.get('periodic')) is not None: + args['periodic'] = periodic + else: + raise ValueError( + 'Required property \'periodic\' not present in ProcessingMetrics JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a ProcessingMetrics object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, + 'processed_audio') and self.processed_audio is not None: + if isinstance(self.processed_audio, dict): + _dict['processed_audio'] = self.processed_audio + else: + _dict['processed_audio'] = self.processed_audio.to_dict() + if hasattr(self, 'wall_clock_since_first_byte_received' + ) and self.wall_clock_since_first_byte_received is not None: + _dict[ + 'wall_clock_since_first_byte_received'] = self.wall_clock_since_first_byte_received + if hasattr(self, 'periodic') and self.periodic is not None: + _dict['periodic'] = self.periodic + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this ProcessingMetrics object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'ProcessingMetrics') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'ProcessingMetrics') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class RecognitionJob(object): +class RecognitionJob: """ - RecognitionJob. - - :attr str id: The ID of the asynchronous job. - :attr str status: The current status of the job: - * `waiting`: The service is preparing the job for processing. The service returns this - status when the job is initially created or when it is waiting for capacity to process - the job. The job remains in this state until the service has the capacity to begin - processing it. - * `processing`: The service is actively processing the job. - * `completed`: The service has finished processing the job. If the job specified a - callback URL and the event `recognitions.completed_with_results`, the service sent the - results with the callback notification. Otherwise, you must retrieve the results by - checking the individual job. - * `failed`: The job failed. - :attr str created: The date and time in Coordinated Universal Time (UTC) at which the - job was created. The value is provided in full ISO 8601 format - (`YYYY-MM-DDThh:mm:ss.sTZD`). - :attr str updated: (optional) The date and time in Coordinated Universal Time (UTC) at - which the job was last updated by the service. The value is provided in full ISO 8601 - format (`YYYY-MM-DDThh:mm:ss.sTZD`). This field is returned only by the **Check jobs** - and **Check a job** methods. - :attr str url: (optional) The URL to use to request information about the job with the - **Check a job** method. This field is returned only by the **Create a job** method. - :attr str user_token: (optional) The user token associated with a job that was created - with a callback URL and a user token. This field can be returned only by the **Check - jobs** method. - :attr list[SpeechRecognitionResults] results: (optional) If the status is `completed`, - the results of the recognition request as an array that includes a single instance of - a `SpeechRecognitionResults` object. This field is returned only by the **Check a - job** method. - :attr list[str] warnings: (optional) An array of warning messages about invalid - parameters included with the request. Each warning includes a descriptive message and - a list of invalid argument strings, for example, `"unexpected query parameter - 'user_token', query parameter 'callback_url' was not specified"`. The request succeeds - despite the warnings. This field can be returned only by the **Create a job** method. + Information about a current asynchronous speech recognition job. + + :param str id: The ID of the asynchronous job. + :param str status: The current status of the job: + * `waiting`: The service is preparing the job for processing. The service + returns this status when the job is initially created or when it is waiting for + capacity to process the job. The job remains in this state until the service has + the capacity to begin processing it. + * `processing`: The service is actively processing the job. + * `completed`: The service has finished processing the job. If the job specified + a callback URL and the event `recognitions.completed_with_results`, the service + sent the results with the callback notification. Otherwise, you must retrieve + the results by checking the individual job. + * `failed`: The job failed. + :param str created: The date and time in Coordinated Universal Time (UTC) at + which the job was created. The value is provided in full ISO 8601 format + (`YYYY-MM-DDThh:mm:ss.sTZD`). + :param str updated: (optional) The date and time in Coordinated Universal Time + (UTC) at which the job was last updated by the service. The value is provided in + full ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`). This field is returned only + by the [Check jobs](#checkjobs) and [Check a job[(#checkjob) methods. + :param str url: (optional) The URL to use to request information about the job + with the [Check a job](#checkjob) method. This field is returned only by the + [Create a job](#createjob) method. + :param str user_token: (optional) The user token associated with a job that was + created with a callback URL and a user token. This field can be returned only by + the [Check jobs](#checkjobs) method. + :param List[SpeechRecognitionResults] results: (optional) If the status is + `completed`, the results of the recognition request as an array that includes a + single instance of a `SpeechRecognitionResults` object. This field is returned + only by the [Check a job](#checkjob) method. + :param List[str] warnings: (optional) An array of warning messages about invalid + parameters included with the request. Each warning includes a descriptive + message and a list of invalid argument strings, for example, `"unexpected query + parameter 'user_token', query parameter 'callback_url' was not specified"`. The + request succeeds despite the warnings. This field can be returned only by the + [Create a job](#createjob) method. (If you use the `character_insertion_bias` + parameter with a previous-generation model, the warning message refers to the + parameter as `lambdaBias`.). """ - def __init__(self, - id, - status, - created, - updated=None, - url=None, - user_token=None, - results=None, - warnings=None): + def __init__( + self, + id: str, + status: str, + created: str, + *, + updated: Optional[str] = None, + url: Optional[str] = None, + user_token: Optional[str] = None, + results: Optional[List['SpeechRecognitionResults']] = None, + warnings: Optional[List[str]] = None, + ) -> None: """ Initialize a RecognitionJob object. :param str id: The ID of the asynchronous job. :param str status: The current status of the job: - * `waiting`: The service is preparing the job for processing. The service returns - this status when the job is initially created or when it is waiting for capacity - to process the job. The job remains in this state until the service has the - capacity to begin processing it. - * `processing`: The service is actively processing the job. - * `completed`: The service has finished processing the job. If the job specified a - callback URL and the event `recognitions.completed_with_results`, the service sent - the results with the callback notification. Otherwise, you must retrieve the - results by checking the individual job. - * `failed`: The job failed. - :param str created: The date and time in Coordinated Universal Time (UTC) at which - the job was created. The value is provided in full ISO 8601 format - (`YYYY-MM-DDThh:mm:ss.sTZD`). - :param str updated: (optional) The date and time in Coordinated Universal Time - (UTC) at which the job was last updated by the service. The value is provided in - full ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`). This field is returned only by - the **Check jobs** and **Check a job** methods. - :param str url: (optional) The URL to use to request information about the job - with the **Check a job** method. This field is returned only by the **Create a - job** method. - :param str user_token: (optional) The user token associated with a job that was - created with a callback URL and a user token. This field can be returned only by - the **Check jobs** method. - :param list[SpeechRecognitionResults] results: (optional) If the status is - `completed`, the results of the recognition request as an array that includes a - single instance of a `SpeechRecognitionResults` object. This field is returned - only by the **Check a job** method. - :param list[str] warnings: (optional) An array of warning messages about invalid - parameters included with the request. Each warning includes a descriptive message - and a list of invalid argument strings, for example, `"unexpected query parameter - 'user_token', query parameter 'callback_url' was not specified"`. The request - succeeds despite the warnings. This field can be returned only by the **Create a - job** method. + * `waiting`: The service is preparing the job for processing. The service + returns this status when the job is initially created or when it is waiting + for capacity to process the job. The job remains in this state until the + service has the capacity to begin processing it. + * `processing`: The service is actively processing the job. + * `completed`: The service has finished processing the job. If the job + specified a callback URL and the event + `recognitions.completed_with_results`, the service sent the results with + the callback notification. Otherwise, you must retrieve the results by + checking the individual job. + * `failed`: The job failed. + :param str created: The date and time in Coordinated Universal Time (UTC) + at which the job was created. The value is provided in full ISO 8601 format + (`YYYY-MM-DDThh:mm:ss.sTZD`). + :param str updated: (optional) The date and time in Coordinated Universal + Time (UTC) at which the job was last updated by the service. The value is + provided in full ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`). This field + is returned only by the [Check jobs](#checkjobs) and [Check a + job[(#checkjob) methods. + :param str url: (optional) The URL to use to request information about the + job with the [Check a job](#checkjob) method. This field is returned only + by the [Create a job](#createjob) method. + :param str user_token: (optional) The user token associated with a job that + was created with a callback URL and a user token. This field can be + returned only by the [Check jobs](#checkjobs) method. + :param List[SpeechRecognitionResults] results: (optional) If the status is + `completed`, the results of the recognition request as an array that + includes a single instance of a `SpeechRecognitionResults` object. This + field is returned only by the [Check a job](#checkjob) method. + :param List[str] warnings: (optional) An array of warning messages about + invalid parameters included with the request. Each warning includes a + descriptive message and a list of invalid argument strings, for example, + `"unexpected query parameter 'user_token', query parameter 'callback_url' + was not specified"`. The request succeeds despite the warnings. This field + can be returned only by the [Create a job](#createjob) method. (If you use + the `character_insertion_bias` parameter with a previous-generation model, + the warning message refers to the parameter as `lambdaBias`.). """ self.id = id self.status = status @@ -4320,42 +8214,46 @@ def __init__(self, self.warnings = warnings @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'RecognitionJob': """Initialize a RecognitionJob object from a json dictionary.""" args = {} - if 'id' in _dict: - args['id'] = _dict.get('id') + if (id := _dict.get('id')) is not None: + args['id'] = id else: raise ValueError( 'Required property \'id\' not present in RecognitionJob JSON') - if 'status' in _dict: - args['status'] = _dict.get('status') + if (status := _dict.get('status')) is not None: + args['status'] = status else: raise ValueError( 'Required property \'status\' not present in RecognitionJob JSON' ) - if 'created' in _dict: - args['created'] = _dict.get('created') + if (created := _dict.get('created')) is not None: + args['created'] = created else: raise ValueError( 'Required property \'created\' not present in RecognitionJob JSON' ) - if 'updated' in _dict: - args['updated'] = _dict.get('updated') - if 'url' in _dict: - args['url'] = _dict.get('url') - if 'user_token' in _dict: - args['user_token'] = _dict.get('user_token') - if 'results' in _dict: + if (updated := _dict.get('updated')) is not None: + args['updated'] = updated + if (url := _dict.get('url')) is not None: + args['url'] = url + if (user_token := _dict.get('user_token')) is not None: + args['user_token'] = user_token + if (results := _dict.get('results')) is not None: args['results'] = [ - SpeechRecognitionResults._from_dict(x) - for x in (_dict.get('results')) + SpeechRecognitionResults.from_dict(v) for v in results ] - if 'warnings' in _dict: - args['warnings'] = _dict.get('warnings') + if (warnings := _dict.get('warnings')) is not None: + args['warnings'] = warnings return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a RecognitionJob object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'id') and self.id is not None: @@ -4371,53 +8269,85 @@ def _to_dict(self): if hasattr(self, 'user_token') and self.user_token is not None: _dict['user_token'] = self.user_token if hasattr(self, 'results') and self.results is not None: - _dict['results'] = [x._to_dict() for x in self.results] + results_list = [] + for v in self.results: + if isinstance(v, dict): + results_list.append(v) + else: + results_list.append(v.to_dict()) + _dict['results'] = results_list if hasattr(self, 'warnings') and self.warnings is not None: _dict['warnings'] = self.warnings return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this RecognitionJob object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'RecognitionJob') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'RecognitionJob') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other + class StatusEnum(str, Enum): + """ + The current status of the job: + * `waiting`: The service is preparing the job for processing. The service returns + this status when the job is initially created or when it is waiting for capacity + to process the job. The job remains in this state until the service has the + capacity to begin processing it. + * `processing`: The service is actively processing the job. + * `completed`: The service has finished processing the job. If the job specified a + callback URL and the event `recognitions.completed_with_results`, the service sent + the results with the callback notification. Otherwise, you must retrieve the + results by checking the individual job. + * `failed`: The job failed. + """ + + WAITING = 'waiting' + PROCESSING = 'processing' + COMPLETED = 'completed' + FAILED = 'failed' + -class RecognitionJobs(object): +class RecognitionJobs: """ - RecognitionJobs. + Information about current asynchronous speech recognition jobs. - :attr list[RecognitionJob] recognitions: An array of `RecognitionJob` objects that - provides the status for each of the user's current jobs. The array is empty if the - user has no current jobs. + :param List[RecognitionJob] recognitions: An array of `RecognitionJob` objects + that provides the status for each of the user's current jobs. The array is empty + if the user has no current jobs. """ - def __init__(self, recognitions): + def __init__( + self, + recognitions: List['RecognitionJob'], + ) -> None: """ Initialize a RecognitionJobs object. - :param list[RecognitionJob] recognitions: An array of `RecognitionJob` objects - that provides the status for each of the user's current jobs. The array is empty - if the user has no current jobs. + :param List[RecognitionJob] recognitions: An array of `RecognitionJob` + objects that provides the status for each of the user's current jobs. The + array is empty if the user has no current jobs. """ self.recognitions = recognitions @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'RecognitionJobs': """Initialize a RecognitionJobs object from a json dictionary.""" args = {} - if 'recognitions' in _dict: + if (recognitions := _dict.get('recognitions')) is not None: args['recognitions'] = [ - RecognitionJob._from_dict(x) - for x in (_dict.get('recognitions')) + RecognitionJob.from_dict(v) for v in recognitions ] else: raise ValueError( @@ -4425,70 +8355,95 @@ def _from_dict(cls, _dict): ) return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a RecognitionJobs object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'recognitions') and self.recognitions is not None: - _dict['recognitions'] = [x._to_dict() for x in self.recognitions] + recognitions_list = [] + for v in self.recognitions: + if isinstance(v, dict): + recognitions_list.append(v) + else: + recognitions_list.append(v.to_dict()) + _dict['recognitions'] = recognitions_list return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this RecognitionJobs object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'RecognitionJobs') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'RecognitionJobs') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class RegisterStatus(object): +class RegisterStatus: """ - RegisterStatus. - - :attr str status: The current status of the job: - * `created`: The service successfully white-listed the callback URL as a result of the - call. - * `already created`: The URL was already white-listed. - :attr str url: The callback URL that is successfully registered. + Information about a request to register a callback for asynchronous speech + recognition. + + :param str status: The current status of the job: + * `created`: The service successfully allowlisted the callback URL as a result + of the call. + * `already created`: The URL was already allowlisted. + :param str url: The callback URL that is successfully registered. """ - def __init__(self, status, url): + def __init__( + self, + status: str, + url: str, + ) -> None: """ Initialize a RegisterStatus object. :param str status: The current status of the job: - * `created`: The service successfully white-listed the callback URL as a result of - the call. - * `already created`: The URL was already white-listed. + * `created`: The service successfully allowlisted the callback URL as a + result of the call. + * `already created`: The URL was already allowlisted. :param str url: The callback URL that is successfully registered. """ self.status = status self.url = url @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'RegisterStatus': """Initialize a RegisterStatus object from a json dictionary.""" args = {} - if 'status' in _dict: - args['status'] = _dict.get('status') + if (status := _dict.get('status')) is not None: + args['status'] = status else: raise ValueError( 'Required property \'status\' not present in RegisterStatus JSON' ) - if 'url' in _dict: - args['url'] = _dict.get('url') + if (url := _dict.get('url')) is not None: + args['url'] = url else: raise ValueError( 'Required property \'url\' not present in RegisterStatus JSON') return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a RegisterStatus object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'status') and self.status is not None: @@ -4497,106 +8452,134 @@ def _to_dict(self): _dict['url'] = self.url return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this RegisterStatus object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'RegisterStatus') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'RegisterStatus') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other + class StatusEnum(str, Enum): + """ + The current status of the job: + * `created`: The service successfully allowlisted the callback URL as a result of + the call. + * `already created`: The URL was already allowlisted. + """ + + CREATED = 'created' + ALREADY_CREATED = 'already created' -class SpeakerLabelsResult(object): + +class SpeakerLabelsResult: """ - SpeakerLabelsResult. - - :attr float from_: The start time of a word from the transcript. The value matches the - start time of a word from the `timestamps` array. - :attr float to: The end time of a word from the transcript. The value matches the end - time of a word from the `timestamps` array. - :attr int speaker: The numeric identifier that the service assigns to a speaker from - the audio. Speaker IDs begin at `0` initially but can evolve and change across interim - results (if supported by the method) and between interim and final results as the - service processes the audio. They are not guaranteed to be sequential, contiguous, or - ordered. - :attr float confidence: A score that indicates the service's confidence in its - identification of the speaker in the range of 0.0 to 1.0. - :attr bool final_results: An indication of whether the service might further change - word and speaker-label results. A value of `true` means that the service guarantees - not to send any further updates for the current or any preceding results; `false` - means that the service might send further updates to the results. + Information about the speakers from speech recognition results. + + :param float from_: The start time of a word from the transcript. The value + matches the start time of a word from the `timestamps` array. + :param float to: The end time of a word from the transcript. The value matches + the end time of a word from the `timestamps` array. + :param int speaker: The numeric identifier that the service assigns to a speaker + from the audio. Speaker IDs begin at `0` initially but can evolve and change + across interim results (if supported by the method) and between interim and + final results as the service processes the audio. They are not guaranteed to be + sequential, contiguous, or ordered. + :param float confidence: A score that indicates the service's confidence in its + identification of the speaker in the range of 0.0 to 1.0. + :param bool final: An indication of whether the service might further change + word and speaker-label results. A value of `true` means that the service + guarantees not to send any further updates for the current or any preceding + results; `false` means that the service might send further updates to the + results. """ - def __init__(self, from_, to, speaker, confidence, final_results): + def __init__( + self, + from_: float, + to: float, + speaker: int, + confidence: float, + final: bool, + ) -> None: """ Initialize a SpeakerLabelsResult object. :param float from_: The start time of a word from the transcript. The value - matches the start time of a word from the `timestamps` array. - :param float to: The end time of a word from the transcript. The value matches the - end time of a word from the `timestamps` array. - :param int speaker: The numeric identifier that the service assigns to a speaker - from the audio. Speaker IDs begin at `0` initially but can evolve and change - across interim results (if supported by the method) and between interim and final - results as the service processes the audio. They are not guaranteed to be - sequential, contiguous, or ordered. - :param float confidence: A score that indicates the service's confidence in its - identification of the speaker in the range of 0.0 to 1.0. - :param bool final_results: An indication of whether the service might further - change word and speaker-label results. A value of `true` means that the service - guarantees not to send any further updates for the current or any preceding - results; `false` means that the service might send further updates to the results. + matches the start time of a word from the `timestamps` array. + :param float to: The end time of a word from the transcript. The value + matches the end time of a word from the `timestamps` array. + :param int speaker: The numeric identifier that the service assigns to a + speaker from the audio. Speaker IDs begin at `0` initially but can evolve + and change across interim results (if supported by the method) and between + interim and final results as the service processes the audio. They are not + guaranteed to be sequential, contiguous, or ordered. + :param float confidence: A score that indicates the service's confidence in + its identification of the speaker in the range of 0.0 to 1.0. + :param bool final: An indication of whether the service might further + change word and speaker-label results. A value of `true` means that the + service guarantees not to send any further updates for the current or any + preceding results; `false` means that the service might send further + updates to the results. """ self.from_ = from_ self.to = to self.speaker = speaker self.confidence = confidence - self.final_results = final_results + self.final = final @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'SpeakerLabelsResult': """Initialize a SpeakerLabelsResult object from a json dictionary.""" args = {} - if 'from' in _dict: - args['from_'] = _dict.get('from') + if (from_ := _dict.get('from')) is not None: + args['from_'] = from_ else: raise ValueError( 'Required property \'from\' not present in SpeakerLabelsResult JSON' ) - if 'to' in _dict: - args['to'] = _dict.get('to') + if (to := _dict.get('to')) is not None: + args['to'] = to else: raise ValueError( 'Required property \'to\' not present in SpeakerLabelsResult JSON' ) - if 'speaker' in _dict: - args['speaker'] = _dict.get('speaker') + if (speaker := _dict.get('speaker')) is not None: + args['speaker'] = speaker else: raise ValueError( 'Required property \'speaker\' not present in SpeakerLabelsResult JSON' ) - if 'confidence' in _dict: - args['confidence'] = _dict.get('confidence') + if (confidence := _dict.get('confidence')) is not None: + args['confidence'] = confidence else: raise ValueError( 'Required property \'confidence\' not present in SpeakerLabelsResult JSON' ) - if 'final' in _dict or 'final_results' in _dict: - args['final_results'] = _dict.get('final') or _dict.get( - 'final_results') + if (final := _dict.get('final')) is not None: + args['final'] = final else: raise ValueError( 'Required property \'final\' not present in SpeakerLabelsResult JSON' ) return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a SpeakerLabelsResult object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'from_') and self.from_ is not None: @@ -4607,53 +8590,66 @@ def _to_dict(self): _dict['speaker'] = self.speaker if hasattr(self, 'confidence') and self.confidence is not None: _dict['confidence'] = self.confidence - if hasattr(self, 'final_results') and self.final_results is not None: - _dict['final'] = self.final_results + if hasattr(self, 'final') and self.final is not None: + _dict['final'] = self.final return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this SpeakerLabelsResult object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'SpeakerLabelsResult') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'SpeakerLabelsResult') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class SpeechModel(object): +class SpeechModel: """ - SpeechModel. - - :attr str name: The name of the model for use as an identifier in calls to the service - (for example, `en-US_BroadbandModel`). - :attr str language: The language identifier of the model (for example, `en-US`). - :attr int rate: The sampling rate (minimum acceptable rate for audio) used by the - model in Hertz. - :attr str url: The URI for the model. - :attr SupportedFeatures supported_features: Describes the additional service features - that are supported with the model. - :attr str description: A brief description of the model. + Information about an available language model. + + :param str name: The name of the model for use as an identifier in calls to the + service (for example, `en-US_BroadbandModel`). + :param str language: The language identifier of the model (for example, + `en-US`). + :param int rate: The sampling rate (minimum acceptable rate for audio) used by + the model in Hertz. + :param str url: The URI for the model. + :param SupportedFeatures supported_features: Indicates whether select service + features are supported with the model. + :param str description: A brief description of the model. """ - def __init__(self, name, language, rate, url, supported_features, - description): + def __init__( + self, + name: str, + language: str, + rate: int, + url: str, + supported_features: 'SupportedFeatures', + description: str, + ) -> None: """ Initialize a SpeechModel object. - :param str name: The name of the model for use as an identifier in calls to the - service (for example, `en-US_BroadbandModel`). - :param str language: The language identifier of the model (for example, `en-US`). - :param int rate: The sampling rate (minimum acceptable rate for audio) used by the - model in Hertz. + :param str name: The name of the model for use as an identifier in calls to + the service (for example, `en-US_BroadbandModel`). + :param str language: The language identifier of the model (for example, + `en-US`). + :param int rate: The sampling rate (minimum acceptable rate for audio) used + by the model in Hertz. :param str url: The URI for the model. - :param SupportedFeatures supported_features: Describes the additional service - features that are supported with the model. + :param SupportedFeatures supported_features: Indicates whether select + service features are supported with the model. :param str description: A brief description of the model. """ self.name = name @@ -4664,46 +8660,51 @@ def __init__(self, name, language, rate, url, supported_features, self.description = description @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'SpeechModel': """Initialize a SpeechModel object from a json dictionary.""" args = {} - if 'name' in _dict: - args['name'] = _dict.get('name') + if (name := _dict.get('name')) is not None: + args['name'] = name else: raise ValueError( 'Required property \'name\' not present in SpeechModel JSON') - if 'language' in _dict: - args['language'] = _dict.get('language') + if (language := _dict.get('language')) is not None: + args['language'] = language else: raise ValueError( 'Required property \'language\' not present in SpeechModel JSON' ) - if 'rate' in _dict: - args['rate'] = _dict.get('rate') + if (rate := _dict.get('rate')) is not None: + args['rate'] = rate else: raise ValueError( 'Required property \'rate\' not present in SpeechModel JSON') - if 'url' in _dict: - args['url'] = _dict.get('url') + if (url := _dict.get('url')) is not None: + args['url'] = url else: raise ValueError( 'Required property \'url\' not present in SpeechModel JSON') - if 'supported_features' in _dict: - args['supported_features'] = SupportedFeatures._from_dict( - _dict.get('supported_features')) + if (supported_features := _dict.get('supported_features')) is not None: + args['supported_features'] = SupportedFeatures.from_dict( + supported_features) else: raise ValueError( 'Required property \'supported_features\' not present in SpeechModel JSON' ) - if 'description' in _dict: - args['description'] = _dict.get('description') + if (description := _dict.get('description')) is not None: + args['description'] = description else: raise ValueError( 'Required property \'description\' not present in SpeechModel JSON' ) return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a SpeechModel object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'name') and self.name is not None: @@ -4717,120 +8718,148 @@ def _to_dict(self): if hasattr( self, 'supported_features') and self.supported_features is not None: - _dict['supported_features'] = self.supported_features._to_dict() + if isinstance(self.supported_features, dict): + _dict['supported_features'] = self.supported_features + else: + _dict['supported_features'] = self.supported_features.to_dict() if hasattr(self, 'description') and self.description is not None: _dict['description'] = self.description return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this SpeechModel object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'SpeechModel') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'SpeechModel') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class SpeechModels(object): +class SpeechModels: """ - SpeechModels. + Information about the available language models. - :attr list[SpeechModel] models: An array of `SpeechModel` objects that provides - information about each available model. + :param List[SpeechModel] models: An array of `SpeechModel` objects that provides + information about each available model. """ - def __init__(self, models): + def __init__( + self, + models: List['SpeechModel'], + ) -> None: """ Initialize a SpeechModels object. - :param list[SpeechModel] models: An array of `SpeechModel` objects that provides - information about each available model. + :param List[SpeechModel] models: An array of `SpeechModel` objects that + provides information about each available model. """ self.models = models @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'SpeechModels': """Initialize a SpeechModels object from a json dictionary.""" args = {} - if 'models' in _dict: - args['models'] = [ - SpeechModel._from_dict(x) for x in (_dict.get('models')) - ] + if (models := _dict.get('models')) is not None: + args['models'] = [SpeechModel.from_dict(v) for v in models] else: raise ValueError( 'Required property \'models\' not present in SpeechModels JSON') return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a SpeechModels object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'models') and self.models is not None: - _dict['models'] = [x._to_dict() for x in self.models] + models_list = [] + for v in self.models: + if isinstance(v, dict): + models_list.append(v) + else: + models_list.append(v.to_dict()) + _dict['models'] = models_list return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this SpeechModels object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'SpeechModels') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'SpeechModels') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class SpeechRecognitionAlternative(object): +class SpeechRecognitionAlternative: """ - SpeechRecognitionAlternative. - - :attr str transcript: A transcription of the audio. - :attr float confidence: (optional) A score that indicates the service's confidence in - the transcript in the range of 0.0 to 1.0. A confidence score is returned only for the - best alternative and only with results marked as final. - :attr list[str] timestamps: (optional) Time alignments for each word from the - transcript as a list of lists. Each inner list consists of three elements: the word - followed by its start and end time in seconds, for example: - `[["hello",0.0,1.2],["world",1.2,2.5]]`. Timestamps are returned only for the best - alternative. - :attr list[str] word_confidence: (optional) A confidence score for each word of the - transcript as a list of lists. Each inner list consists of two elements: the word and - its confidence score in the range of 0.0 to 1.0, for example: - `[["hello",0.95],["world",0.866]]`. Confidence scores are returned only for the best - alternative and only with results marked as final. + An alternative transcript from speech recognition results. + + :param str transcript: A transcription of the audio. + :param float confidence: (optional) A score that indicates the service's + confidence in the transcript in the range of 0.0 to 1.0. The service returns a + confidence score only for the best alternative and only with results marked as + final. + :param List[str] timestamps: (optional) Time alignments for each word from the + transcript as a list of lists. Each inner list consists of three elements: the + word followed by its start and end time in seconds, for example: + `[["hello",0.0,1.2],["world",1.2,2.5]]`. Timestamps are returned only for the + best alternative. + :param List[str] word_confidence: (optional) A confidence score for each word of + the transcript as a list of lists. Each inner list consists of two elements: the + word and its confidence score in the range of 0.0 to 1.0, for example: + `[["hello",0.95],["world",0.86]]`. Confidence scores are returned only for the + best alternative and only with results marked as final. """ - def __init__(self, - transcript, - confidence=None, - timestamps=None, - word_confidence=None): + def __init__( + self, + transcript: str, + *, + confidence: Optional[float] = None, + timestamps: Optional[List[str]] = None, + word_confidence: Optional[List[str]] = None, + ) -> None: """ Initialize a SpeechRecognitionAlternative object. :param str transcript: A transcription of the audio. :param float confidence: (optional) A score that indicates the service's - confidence in the transcript in the range of 0.0 to 1.0. A confidence score is - returned only for the best alternative and only with results marked as final. - :param list[str] timestamps: (optional) Time alignments for each word from the - transcript as a list of lists. Each inner list consists of three elements: the - word followed by its start and end time in seconds, for example: - `[["hello",0.0,1.2],["world",1.2,2.5]]`. Timestamps are returned only for the best - alternative. - :param list[str] word_confidence: (optional) A confidence score for each word of - the transcript as a list of lists. Each inner list consists of two elements: the - word and its confidence score in the range of 0.0 to 1.0, for example: - `[["hello",0.95],["world",0.866]]`. Confidence scores are returned only for the - best alternative and only with results marked as final. + confidence in the transcript in the range of 0.0 to 1.0. The service + returns a confidence score only for the best alternative and only with + results marked as final. + :param List[str] timestamps: (optional) Time alignments for each word from + the transcript as a list of lists. Each inner list consists of three + elements: the word followed by its start and end time in seconds, for + example: `[["hello",0.0,1.2],["world",1.2,2.5]]`. Timestamps are returned + only for the best alternative. + :param List[str] word_confidence: (optional) A confidence score for each + word of the transcript as a list of lists. Each inner list consists of two + elements: the word and its confidence score in the range of 0.0 to 1.0, for + example: `[["hello",0.95],["world",0.86]]`. Confidence scores are returned + only for the best alternative and only with results marked as final. """ self.transcript = transcript self.confidence = confidence @@ -4838,24 +8867,29 @@ def __init__(self, self.word_confidence = word_confidence @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'SpeechRecognitionAlternative': """Initialize a SpeechRecognitionAlternative object from a json dictionary.""" args = {} - if 'transcript' in _dict: - args['transcript'] = _dict.get('transcript') + if (transcript := _dict.get('transcript')) is not None: + args['transcript'] = transcript else: raise ValueError( 'Required property \'transcript\' not present in SpeechRecognitionAlternative JSON' ) - if 'confidence' in _dict: - args['confidence'] = _dict.get('confidence') - if 'timestamps' in _dict: - args['timestamps'] = _dict.get('timestamps') - if 'word_confidence' in _dict: - args['word_confidence'] = _dict.get('word_confidence') + if (confidence := _dict.get('confidence')) is not None: + args['confidence'] = confidence + if (timestamps := _dict.get('timestamps')) is not None: + args['timestamps'] = timestamps + if (word_confidence := _dict.get('word_confidence')) is not None: + args['word_confidence'] = word_confidence return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a SpeechRecognitionAlternative object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'transcript') and self.transcript is not None: @@ -4869,388 +8903,851 @@ def _to_dict(self): _dict['word_confidence'] = self.word_confidence return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this SpeechRecognitionAlternative object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'SpeechRecognitionAlternative') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'SpeechRecognitionAlternative') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class SpeechRecognitionResult(object): +class SpeechRecognitionResult: """ - SpeechRecognitionResult. - - :attr bool final_results: An indication of whether the transcription results are - final. If `true`, the results for this utterance are not updated further; no - additional results are sent for a `result_index` once its results are indicated as - final. - :attr list[SpeechRecognitionAlternative] alternatives: An array of alternative - transcripts. The `alternatives` array can include additional requested output such as - word confidence or timestamps. - :attr dict keywords_result: (optional) A dictionary (or associative array) whose keys - are the strings specified for `keywords` if both that parameter and - `keywords_threshold` are specified. The value for each key is an array of matches - spotted in the audio for that keyword. Each match is described by a `KeywordResult` - object. A keyword for which no matches are found is omitted from the dictionary. The - dictionary is omitted entirely if no matches are found for any keywords. - :attr list[WordAlternativeResults] word_alternatives: (optional) An array of - alternative hypotheses found for words of the input audio if a - `word_alternatives_threshold` is specified. + Component results for a speech recognition request. + + :param bool final: An indication of whether the transcription results are final: + * If `true`, the results for this utterance are final. They are guaranteed not + to be updated further. + * If `false`, the results are interim. They can be updated with further interim + results until final results are eventually sent. + **Note:** Because `final` is a reserved word in Java, the field is renamed + `xFinal` in Java. + :param List[SpeechRecognitionAlternative] alternatives: An array of alternative + transcripts. The `alternatives` array can include additional requested output + such as word confidence or timestamps. + :param dict keywords_result: (optional) A dictionary (or associative array) + whose keys are the strings specified for `keywords` if both that parameter and + `keywords_threshold` are specified. The value for each key is an array of + matches spotted in the audio for that keyword. Each match is described by a + `KeywordResult` object. A keyword for which no matches are found is omitted from + the dictionary. The dictionary is omitted entirely if no matches are found for + any keywords. + :param List[WordAlternativeResults] word_alternatives: (optional) An array of + alternative hypotheses found for words of the input audio if a + `word_alternatives_threshold` is specified. + :param str end_of_utterance: (optional) If the `split_transcript_at_phrase_end` + parameter is `true`, describes the reason for the split: + * `end_of_data` - The end of the input audio stream. + * `full_stop` - A full semantic stop, such as for the conclusion of a + grammatical sentence. The insertion of splits is influenced by the base language + model and biased by custom language models and grammars. + * `reset` - The amount of audio that is currently being processed exceeds the + two-minute maximum. The service splits the transcript to avoid excessive memory + use. + * `silence` - A pause or silence that is at least as long as the pause interval. """ - def __init__(self, - final_results, - alternatives, - keywords_result=None, - word_alternatives=None): + def __init__( + self, + final: bool, + alternatives: List['SpeechRecognitionAlternative'], + *, + keywords_result: Optional[dict] = None, + word_alternatives: Optional[List['WordAlternativeResults']] = None, + end_of_utterance: Optional[str] = None, + ) -> None: """ Initialize a SpeechRecognitionResult object. - :param bool final_results: An indication of whether the transcription results are - final. If `true`, the results for this utterance are not updated further; no - additional results are sent for a `result_index` once its results are indicated as - final. - :param list[SpeechRecognitionAlternative] alternatives: An array of alternative - transcripts. The `alternatives` array can include additional requested output such - as word confidence or timestamps. - :param dict keywords_result: (optional) A dictionary (or associative array) whose - keys are the strings specified for `keywords` if both that parameter and - `keywords_threshold` are specified. The value for each key is an array of matches - spotted in the audio for that keyword. Each match is described by a - `KeywordResult` object. A keyword for which no matches are found is omitted from - the dictionary. The dictionary is omitted entirely if no matches are found for any - keywords. - :param list[WordAlternativeResults] word_alternatives: (optional) An array of - alternative hypotheses found for words of the input audio if a - `word_alternatives_threshold` is specified. - """ - self.final_results = final_results + :param bool final: An indication of whether the transcription results are + final: + * If `true`, the results for this utterance are final. They are guaranteed + not to be updated further. + * If `false`, the results are interim. They can be updated with further + interim results until final results are eventually sent. + **Note:** Because `final` is a reserved word in Java, the field is renamed + `xFinal` in Java. + :param List[SpeechRecognitionAlternative] alternatives: An array of + alternative transcripts. The `alternatives` array can include additional + requested output such as word confidence or timestamps. + :param dict keywords_result: (optional) A dictionary (or associative array) + whose keys are the strings specified for `keywords` if both that parameter + and `keywords_threshold` are specified. The value for each key is an array + of matches spotted in the audio for that keyword. Each match is described + by a `KeywordResult` object. A keyword for which no matches are found is + omitted from the dictionary. The dictionary is omitted entirely if no + matches are found for any keywords. + :param List[WordAlternativeResults] word_alternatives: (optional) An array + of alternative hypotheses found for words of the input audio if a + `word_alternatives_threshold` is specified. + :param str end_of_utterance: (optional) If the + `split_transcript_at_phrase_end` parameter is `true`, describes the reason + for the split: + * `end_of_data` - The end of the input audio stream. + * `full_stop` - A full semantic stop, such as for the conclusion of a + grammatical sentence. The insertion of splits is influenced by the base + language model and biased by custom language models and grammars. + * `reset` - The amount of audio that is currently being processed exceeds + the two-minute maximum. The service splits the transcript to avoid + excessive memory use. + * `silence` - A pause or silence that is at least as long as the pause + interval. + """ + self.final = final self.alternatives = alternatives self.keywords_result = keywords_result self.word_alternatives = word_alternatives + self.end_of_utterance = end_of_utterance @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'SpeechRecognitionResult': """Initialize a SpeechRecognitionResult object from a json dictionary.""" args = {} - if 'final' in _dict or 'final_results' in _dict: - args['final_results'] = _dict.get('final') or _dict.get( - 'final_results') + if (final := _dict.get('final')) is not None: + args['final'] = final else: raise ValueError( 'Required property \'final\' not present in SpeechRecognitionResult JSON' ) - if 'alternatives' in _dict: + if (alternatives := _dict.get('alternatives')) is not None: args['alternatives'] = [ - SpeechRecognitionAlternative._from_dict(x) - for x in (_dict.get('alternatives')) + SpeechRecognitionAlternative.from_dict(v) for v in alternatives ] else: raise ValueError( 'Required property \'alternatives\' not present in SpeechRecognitionResult JSON' ) - if 'keywords_result' in _dict: - args['keywords_result'] = _dict.get('keywords_result') - if 'word_alternatives' in _dict: + if (keywords_result := _dict.get('keywords_result')) is not None: + args['keywords_result'] = keywords_result + if (word_alternatives := _dict.get('word_alternatives')) is not None: args['word_alternatives'] = [ - WordAlternativeResults._from_dict(x) - for x in (_dict.get('word_alternatives')) + WordAlternativeResults.from_dict(v) for v in word_alternatives ] + if (end_of_utterance := _dict.get('end_of_utterance')) is not None: + args['end_of_utterance'] = end_of_utterance return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a SpeechRecognitionResult object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'final_results') and self.final_results is not None: - _dict['final'] = self.final_results + if hasattr(self, 'final') and self.final is not None: + _dict['final'] = self.final if hasattr(self, 'alternatives') and self.alternatives is not None: - _dict['alternatives'] = [x._to_dict() for x in self.alternatives] + alternatives_list = [] + for v in self.alternatives: + if isinstance(v, dict): + alternatives_list.append(v) + else: + alternatives_list.append(v.to_dict()) + _dict['alternatives'] = alternatives_list if hasattr(self, 'keywords_result') and self.keywords_result is not None: _dict['keywords_result'] = self.keywords_result if hasattr(self, 'word_alternatives') and self.word_alternatives is not None: - _dict['word_alternatives'] = [ - x._to_dict() for x in self.word_alternatives - ] + word_alternatives_list = [] + for v in self.word_alternatives: + if isinstance(v, dict): + word_alternatives_list.append(v) + else: + word_alternatives_list.append(v.to_dict()) + _dict['word_alternatives'] = word_alternatives_list + if hasattr(self, + 'end_of_utterance') and self.end_of_utterance is not None: + _dict['end_of_utterance'] = self.end_of_utterance return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this SpeechRecognitionResult object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'SpeechRecognitionResult') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'SpeechRecognitionResult') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other + class EndOfUtteranceEnum(str, Enum): + """ + If the `split_transcript_at_phrase_end` parameter is `true`, describes the reason + for the split: + * `end_of_data` - The end of the input audio stream. + * `full_stop` - A full semantic stop, such as for the conclusion of a grammatical + sentence. The insertion of splits is influenced by the base language model and + biased by custom language models and grammars. + * `reset` - The amount of audio that is currently being processed exceeds the + two-minute maximum. The service splits the transcript to avoid excessive memory + use. + * `silence` - A pause or silence that is at least as long as the pause interval. + """ + + END_OF_DATA = 'end_of_data' + FULL_STOP = 'full_stop' + RESET = 'reset' + SILENCE = 'silence' + -class SpeechRecognitionResults(object): +class SpeechRecognitionResults: """ - SpeechRecognitionResults. - - :attr list[SpeechRecognitionResult] results: (optional) An array of - `SpeechRecognitionResult` objects that can include interim and final results (interim - results are returned only if supported by the method). Final results are guaranteed - not to change; interim results might be replaced by further interim results and final - results. The service periodically sends updates to the results list; the - `result_index` is set to the lowest index in the array that has changed; it is - incremented for new results. - :attr int result_index: (optional) An index that indicates a change point in the - `results` array. The service increments the index only for additional results that it - sends for new audio for the same request. - :attr list[SpeakerLabelsResult] speaker_labels: (optional) An array of - `SpeakerLabelsResult` objects that identifies which words were spoken by which - speakers in a multi-person exchange. The array is returned only if the - `speaker_labels` parameter is `true`. When interim results are also requested for - methods that support them, it is possible for a `SpeechRecognitionResults` object to - include only the `speaker_labels` field. - :attr list[str] warnings: (optional) An array of warning messages associated with the - request: - * Warnings for invalid parameters or fields can include a descriptive message and a - list of invalid argument strings, for example, `"Unknown arguments:"` or `"Unknown url - query arguments:"` followed by a list of the form `"{invalid_arg_1}, - {invalid_arg_2}."` - * The following warning is returned if the request passes a custom model that is based - on an older version of a base model for which an updated version is available: `"Using - previous version of base model, because your custom model has been built with it. - Please note that this version will be supported only for a limited time. Consider - updating your custom model to the new base model. If you do not do that you will be - automatically switched to base model when you used the non-updated custom model."` - In both cases, the request succeeds despite the warnings. + The complete results for a speech recognition request. + + :param List[SpeechRecognitionResult] results: (optional) An array of + `SpeechRecognitionResult` objects that can include interim and final results + (interim results are returned only if supported by the method). Final results + are guaranteed not to change; interim results might be replaced by further + interim results and eventually final results. + For the HTTP interfaces, all results arrive at the same time. For the WebSocket + interface, results can be sent as multiple separate responses. The service + periodically sends updates to the results list. The `result_index` is + incremented to the lowest index in the array that has changed for new results. + For more information, see [Understanding speech recognition + results](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-basic-response). + :param int result_index: (optional) An index that indicates a change point in + the `results` array. The service increments the index for additional results + that it sends for new audio for the same request. All results with the same + index are delivered at the same time. The same index can include multiple final + results that are delivered with the same response. + :param List[SpeakerLabelsResult] speaker_labels: (optional) An array of + `SpeakerLabelsResult` objects that identifies which words were spoken by which + speakers in a multi-person exchange. The array is returned only if the + `speaker_labels` parameter is `true`. When interim results are also requested + for methods that support them, it is possible for a `SpeechRecognitionResults` + object to include only the `speaker_labels` field. + :param ProcessingMetrics processing_metrics: (optional) If processing metrics + are requested, information about the service's processing of the input audio. + Processing metrics are not available with the synchronous [Recognize + audio](#recognize) method. + :param AudioMetrics audio_metrics: (optional) If audio metrics are requested, + information about the signal characteristics of the input audio. + :param List[str] warnings: (optional) An array of warning messages associated + with the request: + * Warnings for invalid parameters or fields can include a descriptive message + and a list of invalid argument strings, for example, `"Unknown arguments:"` or + `"Unknown url query arguments:"` followed by a list of the form + `"{invalid_arg_1}, {invalid_arg_2}."` (If you use the `character_insertion_bias` + parameter with a previous-generation model, the warning message refers to the + parameter as `lambdaBias`.) + * The following warning is returned if the request passes a custom model that is + based on an older version of a base model for which an updated version is + available: `"Using previous version of base model, because your custom model has + been built with it. Please note that this version will be supported only for a + limited time. Consider updating your custom model to the new base model. If you + do not do that you will be automatically switched to base model when you used + the non-updated custom model."` + In both cases, the request succeeds despite the warnings. + :param EnrichedResults enriched_results: (optional) If enriched results are + requested, transcription with inserted punctuation marks such as periods, + commas, question marks, and exclamation points. """ - def __init__(self, - results=None, - result_index=None, - speaker_labels=None, - warnings=None): + def __init__( + self, + *, + results: Optional[List['SpeechRecognitionResult']] = None, + result_index: Optional[int] = None, + speaker_labels: Optional[List['SpeakerLabelsResult']] = None, + processing_metrics: Optional['ProcessingMetrics'] = None, + audio_metrics: Optional['AudioMetrics'] = None, + warnings: Optional[List[str]] = None, + enriched_results: Optional['EnrichedResults'] = None, + ) -> None: """ Initialize a SpeechRecognitionResults object. - :param list[SpeechRecognitionResult] results: (optional) An array of - `SpeechRecognitionResult` objects that can include interim and final results - (interim results are returned only if supported by the method). Final results are - guaranteed not to change; interim results might be replaced by further interim - results and final results. The service periodically sends updates to the results - list; the `result_index` is set to the lowest index in the array that has changed; - it is incremented for new results. - :param int result_index: (optional) An index that indicates a change point in the - `results` array. The service increments the index only for additional results that - it sends for new audio for the same request. - :param list[SpeakerLabelsResult] speaker_labels: (optional) An array of - `SpeakerLabelsResult` objects that identifies which words were spoken by which - speakers in a multi-person exchange. The array is returned only if the - `speaker_labels` parameter is `true`. When interim results are also requested for - methods that support them, it is possible for a `SpeechRecognitionResults` object - to include only the `speaker_labels` field. - :param list[str] warnings: (optional) An array of warning messages associated with - the request: - * Warnings for invalid parameters or fields can include a descriptive message and - a list of invalid argument strings, for example, `"Unknown arguments:"` or - `"Unknown url query arguments:"` followed by a list of the form `"{invalid_arg_1}, - {invalid_arg_2}."` - * The following warning is returned if the request passes a custom model that is - based on an older version of a base model for which an updated version is - available: `"Using previous version of base model, because your custom model has - been built with it. Please note that this version will be supported only for a - limited time. Consider updating your custom model to the new base model. If you do - not do that you will be automatically switched to base model when you used the - non-updated custom model."` - In both cases, the request succeeds despite the warnings. + :param List[SpeechRecognitionResult] results: (optional) An array of + `SpeechRecognitionResult` objects that can include interim and final + results (interim results are returned only if supported by the method). + Final results are guaranteed not to change; interim results might be + replaced by further interim results and eventually final results. + For the HTTP interfaces, all results arrive at the same time. For the + WebSocket interface, results can be sent as multiple separate responses. + The service periodically sends updates to the results list. The + `result_index` is incremented to the lowest index in the array that has + changed for new results. + For more information, see [Understanding speech recognition + results](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-basic-response). + :param int result_index: (optional) An index that indicates a change point + in the `results` array. The service increments the index for additional + results that it sends for new audio for the same request. All results with + the same index are delivered at the same time. The same index can include + multiple final results that are delivered with the same response. + :param List[SpeakerLabelsResult] speaker_labels: (optional) An array of + `SpeakerLabelsResult` objects that identifies which words were spoken by + which speakers in a multi-person exchange. The array is returned only if + the `speaker_labels` parameter is `true`. When interim results are also + requested for methods that support them, it is possible for a + `SpeechRecognitionResults` object to include only the `speaker_labels` + field. + :param ProcessingMetrics processing_metrics: (optional) If processing + metrics are requested, information about the service's processing of the + input audio. Processing metrics are not available with the synchronous + [Recognize audio](#recognize) method. + :param AudioMetrics audio_metrics: (optional) If audio metrics are + requested, information about the signal characteristics of the input audio. + :param List[str] warnings: (optional) An array of warning messages + associated with the request: + * Warnings for invalid parameters or fields can include a descriptive + message and a list of invalid argument strings, for example, `"Unknown + arguments:"` or `"Unknown url query arguments:"` followed by a list of the + form `"{invalid_arg_1}, {invalid_arg_2}."` (If you use the + `character_insertion_bias` parameter with a previous-generation model, the + warning message refers to the parameter as `lambdaBias`.) + * The following warning is returned if the request passes a custom model + that is based on an older version of a base model for which an updated + version is available: `"Using previous version of base model, because your + custom model has been built with it. Please note that this version will be + supported only for a limited time. Consider updating your custom model to + the new base model. If you do not do that you will be automatically + switched to base model when you used the non-updated custom model."` + In both cases, the request succeeds despite the warnings. + :param EnrichedResults enriched_results: (optional) If enriched results are + requested, transcription with inserted punctuation marks such as periods, + commas, question marks, and exclamation points. """ self.results = results self.result_index = result_index self.speaker_labels = speaker_labels + self.processing_metrics = processing_metrics + self.audio_metrics = audio_metrics self.warnings = warnings + self.enriched_results = enriched_results @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'SpeechRecognitionResults': """Initialize a SpeechRecognitionResults object from a json dictionary.""" args = {} - if 'results' in _dict: + if (results := _dict.get('results')) is not None: args['results'] = [ - SpeechRecognitionResult._from_dict(x) - for x in (_dict.get('results')) + SpeechRecognitionResult.from_dict(v) for v in results ] - if 'result_index' in _dict: - args['result_index'] = _dict.get('result_index') - if 'speaker_labels' in _dict: + if (result_index := _dict.get('result_index')) is not None: + args['result_index'] = result_index + if (speaker_labels := _dict.get('speaker_labels')) is not None: args['speaker_labels'] = [ - SpeakerLabelsResult._from_dict(x) - for x in (_dict.get('speaker_labels')) + SpeakerLabelsResult.from_dict(v) for v in speaker_labels ] - if 'warnings' in _dict: - args['warnings'] = _dict.get('warnings') + if (processing_metrics := _dict.get('processing_metrics')) is not None: + args['processing_metrics'] = ProcessingMetrics.from_dict( + processing_metrics) + if (audio_metrics := _dict.get('audio_metrics')) is not None: + args['audio_metrics'] = AudioMetrics.from_dict(audio_metrics) + if (warnings := _dict.get('warnings')) is not None: + args['warnings'] = warnings + if (enriched_results := _dict.get('enriched_results')) is not None: + args['enriched_results'] = EnrichedResults.from_dict( + enriched_results) return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a SpeechRecognitionResults object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'results') and self.results is not None: - _dict['results'] = [x._to_dict() for x in self.results] + results_list = [] + for v in self.results: + if isinstance(v, dict): + results_list.append(v) + else: + results_list.append(v.to_dict()) + _dict['results'] = results_list if hasattr(self, 'result_index') and self.result_index is not None: _dict['result_index'] = self.result_index if hasattr(self, 'speaker_labels') and self.speaker_labels is not None: - _dict['speaker_labels'] = [ - x._to_dict() for x in self.speaker_labels - ] + speaker_labels_list = [] + for v in self.speaker_labels: + if isinstance(v, dict): + speaker_labels_list.append(v) + else: + speaker_labels_list.append(v.to_dict()) + _dict['speaker_labels'] = speaker_labels_list + if hasattr( + self, + 'processing_metrics') and self.processing_metrics is not None: + if isinstance(self.processing_metrics, dict): + _dict['processing_metrics'] = self.processing_metrics + else: + _dict['processing_metrics'] = self.processing_metrics.to_dict() + if hasattr(self, 'audio_metrics') and self.audio_metrics is not None: + if isinstance(self.audio_metrics, dict): + _dict['audio_metrics'] = self.audio_metrics + else: + _dict['audio_metrics'] = self.audio_metrics.to_dict() if hasattr(self, 'warnings') and self.warnings is not None: _dict['warnings'] = self.warnings + if hasattr(self, + 'enriched_results') and self.enriched_results is not None: + if isinstance(self.enriched_results, dict): + _dict['enriched_results'] = self.enriched_results + else: + _dict['enriched_results'] = self.enriched_results.to_dict() return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this SpeechRecognitionResults object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'SpeechRecognitionResults') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'SpeechRecognitionResults') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class SupportedFeatures(object): +class SupportedFeatures: """ - Describes the additional service features that are supported with the model. - - :attr bool custom_language_model: Indicates whether the customization interface can be - used to create a custom language model based on the language model. - :attr bool speaker_labels: Indicates whether the `speaker_labels` parameter can be - used with the language model. + Indicates whether select service features are supported with the model. + + :param bool custom_language_model: Indicates whether the customization interface + can be used to create a custom language model based on the language model. + :param bool custom_acoustic_model: Indicates whether the customization interface + can be used to create a custom acoustic model based on the language model. + :param bool speaker_labels: Indicates whether the `speaker_labels` parameter can + be used with the language model. + **Note:** The field returns `true` for all models. However, speaker labels are + supported for use only with the following languages and models: + * _For previous-generation models,_ the parameter can be used with Australian + English, US English, German, Japanese, Korean, and Spanish (both broadband and + narrowband models) and UK English (narrowband model) transcription only. + * _For next-generation models,_ the parameter can be used with Czech, English + (Australian, Indian, UK, and US), German, Japanese, Korean, and Spanish + transcription only. + Speaker labels are not supported for use with any other languages or models. + :param bool low_latency: (optional) Indicates whether the `low_latency` + parameter can be used with a next-generation language model. The field is + returned only for next-generation models. Previous-generation models do not + support the `low_latency` parameter. """ - def __init__(self, custom_language_model, speaker_labels): + def __init__( + self, + custom_language_model: bool, + custom_acoustic_model: bool, + speaker_labels: bool, + *, + low_latency: Optional[bool] = None, + ) -> None: """ Initialize a SupportedFeatures object. - :param bool custom_language_model: Indicates whether the customization interface - can be used to create a custom language model based on the language model. - :param bool speaker_labels: Indicates whether the `speaker_labels` parameter can - be used with the language model. + :param bool custom_language_model: Indicates whether the customization + interface can be used to create a custom language model based on the + language model. + :param bool custom_acoustic_model: Indicates whether the customization + interface can be used to create a custom acoustic model based on the + language model. + :param bool speaker_labels: Indicates whether the `speaker_labels` + parameter can be used with the language model. + **Note:** The field returns `true` for all models. However, speaker labels + are supported for use only with the following languages and models: + * _For previous-generation models,_ the parameter can be used with + Australian English, US English, German, Japanese, Korean, and Spanish (both + broadband and narrowband models) and UK English (narrowband model) + transcription only. + * _For next-generation models,_ the parameter can be used with Czech, + English (Australian, Indian, UK, and US), German, Japanese, Korean, and + Spanish transcription only. + Speaker labels are not supported for use with any other languages or + models. + :param bool low_latency: (optional) Indicates whether the `low_latency` + parameter can be used with a next-generation language model. The field is + returned only for next-generation models. Previous-generation models do not + support the `low_latency` parameter. """ self.custom_language_model = custom_language_model + self.custom_acoustic_model = custom_acoustic_model self.speaker_labels = speaker_labels + self.low_latency = low_latency @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'SupportedFeatures': """Initialize a SupportedFeatures object from a json dictionary.""" args = {} - if 'custom_language_model' in _dict: - args['custom_language_model'] = _dict.get('custom_language_model') + if (custom_language_model := + _dict.get('custom_language_model')) is not None: + args['custom_language_model'] = custom_language_model else: raise ValueError( 'Required property \'custom_language_model\' not present in SupportedFeatures JSON' ) - if 'speaker_labels' in _dict: - args['speaker_labels'] = _dict.get('speaker_labels') + if (custom_acoustic_model := + _dict.get('custom_acoustic_model')) is not None: + args['custom_acoustic_model'] = custom_acoustic_model + else: + raise ValueError( + 'Required property \'custom_acoustic_model\' not present in SupportedFeatures JSON' + ) + if (speaker_labels := _dict.get('speaker_labels')) is not None: + args['speaker_labels'] = speaker_labels else: raise ValueError( 'Required property \'speaker_labels\' not present in SupportedFeatures JSON' ) + if (low_latency := _dict.get('low_latency')) is not None: + args['low_latency'] = low_latency return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a SupportedFeatures object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'custom_language_model' ) and self.custom_language_model is not None: _dict['custom_language_model'] = self.custom_language_model + if hasattr(self, 'custom_acoustic_model' + ) and self.custom_acoustic_model is not None: + _dict['custom_acoustic_model'] = self.custom_acoustic_model if hasattr(self, 'speaker_labels') and self.speaker_labels is not None: _dict['speaker_labels'] = self.speaker_labels + if hasattr(self, 'low_latency') and self.low_latency is not None: + _dict['low_latency'] = self.low_latency return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this SupportedFeatures object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'SupportedFeatures') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'SupportedFeatures') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class Word(object): +class TrainingResponse: """ - Word. - - :attr str word: A word from the custom model's words resource. The spelling of the - word is used to train the model. - :attr list[str] sounds_like: An array of pronunciations for the word. The array can - include the sounds-like pronunciation automatically generated by the service if none - is provided for the word; the service adds this pronunciation when it finishes - processing the word. - :attr str display_as: The spelling of the word that the service uses to display the - word in a transcript. The field contains an empty string if no display-as value is - provided for the word, in which case the word is displayed as it is spelled. - :attr int count: A sum of the number of times the word is found across all corpora. - For example, if the word occurs five times in one corpus and seven times in another, - its count is `12`. If you add a custom word to a model before it is added by any - corpora, the count begins at `1`; if the word is added from a corpus first and later - modified, the count reflects only the number of times it is found in corpora. - :attr list[str] source: An array of sources that describes how the word was added to - the custom model's words resource. For OOV words added from a corpus, includes the - name of the corpus; if the word was added by multiple corpora, the names of all - corpora are listed. If the word was modified or added by the user directly, the field - includes the string `user`. - :attr list[WordError] error: (optional) If the service discovered one or more problems - that you need to correct for the word's definition, an array that describes each of - the errors. + The response from training of a custom language or custom acoustic model. + + :param List[TrainingWarning] warnings: (optional) An array of `TrainingWarning` + objects that lists any invalid resources contained in the custom model. For + custom language models, invalid resources are grouped and identified by type of + resource. The method can return warnings only if the `strict` parameter is set + to `false`. + """ + + def __init__( + self, + *, + warnings: Optional[List['TrainingWarning']] = None, + ) -> None: + """ + Initialize a TrainingResponse object. + + :param List[TrainingWarning] warnings: (optional) An array of + `TrainingWarning` objects that lists any invalid resources contained in the + custom model. For custom language models, invalid resources are grouped and + identified by type of resource. The method can return warnings only if the + `strict` parameter is set to `false`. + """ + self.warnings = warnings + + @classmethod + def from_dict(cls, _dict: Dict) -> 'TrainingResponse': + """Initialize a TrainingResponse object from a json dictionary.""" + args = {} + if (warnings := _dict.get('warnings')) is not None: + args['warnings'] = [TrainingWarning.from_dict(v) for v in warnings] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a TrainingResponse object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'warnings') and self.warnings is not None: + warnings_list = [] + for v in self.warnings: + if isinstance(v, dict): + warnings_list.append(v) + else: + warnings_list.append(v.to_dict()) + _dict['warnings'] = warnings_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this TrainingResponse object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'TrainingResponse') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'TrainingResponse') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class TrainingWarning: + """ + A warning from training of a custom language or custom acoustic model. + + :param str code: An identifier for the type of invalid resources listed in the + `description` field. + :param str message: A warning message that lists the invalid resources that are + excluded from the custom model's training. The message has the following format: + `Analysis of the following {resource_type} has not completed successfully: + [{resource_names}]. They will be excluded from custom {model_type} model + training.`. + """ + + def __init__( + self, + code: str, + message: str, + ) -> None: + """ + Initialize a TrainingWarning object. + + :param str code: An identifier for the type of invalid resources listed in + the `description` field. + :param str message: A warning message that lists the invalid resources that + are excluded from the custom model's training. The message has the + following format: `Analysis of the following {resource_type} has not + completed successfully: [{resource_names}]. They will be excluded from + custom {model_type} model training.`. + """ + self.code = code + self.message = message + + @classmethod + def from_dict(cls, _dict: Dict) -> 'TrainingWarning': + """Initialize a TrainingWarning object from a json dictionary.""" + args = {} + if (code := _dict.get('code')) is not None: + args['code'] = code + else: + raise ValueError( + 'Required property \'code\' not present in TrainingWarning JSON' + ) + if (message := _dict.get('message')) is not None: + args['message'] = message + else: + raise ValueError( + 'Required property \'message\' not present in TrainingWarning JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a TrainingWarning object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'code') and self.code is not None: + _dict['code'] = self.code + if hasattr(self, 'message') and self.message is not None: + _dict['message'] = self.message + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this TrainingWarning object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'TrainingWarning') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'TrainingWarning') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class CodeEnum(str, Enum): + """ + An identifier for the type of invalid resources listed in the `description` field. + """ + + INVALID_AUDIO_FILES = 'invalid_audio_files' + INVALID_CORPUS_FILES = 'invalid_corpus_files' + INVALID_GRAMMAR_FILES = 'invalid_grammar_files' + INVALID_WORDS = 'invalid_words' + + +class Word: + """ + Information about a word from a custom language model. + + :param str word: A word from the custom model's words resource. The spelling of + the word is used to train the model. + :param List[str] mapping_only: (optional) (Optional) Parameter for custom words. + You can use the 'mapping_only' key in custom words as a form of post processing. + A boolean value that indicates whether the added word should be used to + fine-tune the mode for selected next-gen models. This field appears in the + response body only when it's 'For a custom model that is based on a + previous-generation model', the mapping_only field is populated with the value + set by the user, but would not be used. + :param List[str] sounds_like: An array of as many as five pronunciations for the + word. + * _For a custom model that is based on a previous-generation model_, in addition + to sounds-like pronunciations that were added by a user, the array can include a + sounds-like pronunciation that is automatically generated by the service if none + is provided when the word is added to the custom model. + * _For a custom model that is based on a next-generation model_, the array can + include only sounds-like pronunciations that were added by a user. + :param str display_as: The spelling of the word that the service uses to display + the word in a transcript. + * _For a custom model that is based on a previous-generation model_, the field + can contain an empty string if no display-as value is provided for a word that + exists in the service's base vocabulary. In this case, the word is displayed as + it is spelled. + * _For a custom model that is based on a next-generation model_, the service + uses the spelling of the word as the value of the display-as field when the word + is added to the model. + :param int count: _For a custom model that is based on a previous-generation + model_, a sum of the number of times the word is found across all corpora and + grammars. For example, if the word occurs five times in one corpus and seven + times in another, its count is `12`. If you add a custom word to a model before + it is added by any corpora or grammars, the count begins at `1`; if the word is + added from a corpus or grammar first and later modified, the count reflects only + the number of times it is found in corpora and grammars. + _For a custom model that is based on a next-generation model_, the `count` field + for any word is always `1`. + :param List[str] source: An array of sources that describes how the word was + added to the custom model's words resource. + * _For a custom model that is based on previous-generation model,_ the field + includes the name of each corpus and grammar from which the service extracted + the word. For OOV that are added by multiple corpora or grammars, the names of + all corpora and grammars are listed. If you modified or added the word directly, + the field includes the string `user`. + * _For a custom model that is based on a next-generation model,_ this field + shows only `user` for custom words that were added directly to the custom model. + Words from corpora and grammars are not added to the words resource for custom + models that are based on next-generation models. + :param List[WordError] error: (optional) If the service discovered one or more + problems that you need to correct for the word's definition, an array that + describes each of the errors. """ - def __init__(self, word, sounds_like, display_as, count, source, - error=None): + def __init__( + self, + word: str, + sounds_like: List[str], + display_as: str, + count: int, + source: List[str], + *, + mapping_only: Optional[List[str]] = None, + error: Optional[List['WordError']] = None, + ) -> None: """ Initialize a Word object. - :param str word: A word from the custom model's words resource. The spelling of - the word is used to train the model. - :param list[str] sounds_like: An array of pronunciations for the word. The array - can include the sounds-like pronunciation automatically generated by the service - if none is provided for the word; the service adds this pronunciation when it - finishes processing the word. - :param str display_as: The spelling of the word that the service uses to display - the word in a transcript. The field contains an empty string if no display-as - value is provided for the word, in which case the word is displayed as it is - spelled. - :param int count: A sum of the number of times the word is found across all - corpora. For example, if the word occurs five times in one corpus and seven times - in another, its count is `12`. If you add a custom word to a model before it is - added by any corpora, the count begins at `1`; if the word is added from a corpus - first and later modified, the count reflects only the number of times it is found - in corpora. - :param list[str] source: An array of sources that describes how the word was added - to the custom model's words resource. For OOV words added from a corpus, includes - the name of the corpus; if the word was added by multiple corpora, the names of - all corpora are listed. If the word was modified or added by the user directly, - the field includes the string `user`. - :param list[WordError] error: (optional) If the service discovered one or more - problems that you need to correct for the word's definition, an array that - describes each of the errors. + :param str word: A word from the custom model's words resource. The + spelling of the word is used to train the model. + :param List[str] sounds_like: An array of as many as five pronunciations + for the word. + * _For a custom model that is based on a previous-generation model_, in + addition to sounds-like pronunciations that were added by a user, the array + can include a sounds-like pronunciation that is automatically generated by + the service if none is provided when the word is added to the custom model. + * _For a custom model that is based on a next-generation model_, the array + can include only sounds-like pronunciations that were added by a user. + :param str display_as: The spelling of the word that the service uses to + display the word in a transcript. + * _For a custom model that is based on a previous-generation model_, the + field can contain an empty string if no display-as value is provided for a + word that exists in the service's base vocabulary. In this case, the word + is displayed as it is spelled. + * _For a custom model that is based on a next-generation model_, the + service uses the spelling of the word as the value of the display-as field + when the word is added to the model. + :param int count: _For a custom model that is based on a + previous-generation model_, a sum of the number of times the word is found + across all corpora and grammars. For example, if the word occurs five times + in one corpus and seven times in another, its count is `12`. If you add a + custom word to a model before it is added by any corpora or grammars, the + count begins at `1`; if the word is added from a corpus or grammar first + and later modified, the count reflects only the number of times it is found + in corpora and grammars. + _For a custom model that is based on a next-generation model_, the `count` + field for any word is always `1`. + :param List[str] source: An array of sources that describes how the word + was added to the custom model's words resource. + * _For a custom model that is based on previous-generation model,_ the + field includes the name of each corpus and grammar from which the service + extracted the word. For OOV that are added by multiple corpora or grammars, + the names of all corpora and grammars are listed. If you modified or added + the word directly, the field includes the string `user`. + * _For a custom model that is based on a next-generation model,_ this field + shows only `user` for custom words that were added directly to the custom + model. Words from corpora and grammars are not added to the words resource + for custom models that are based on next-generation models. + :param List[str] mapping_only: (optional) (Optional) Parameter for custom + words. You can use the 'mapping_only' key in custom words as a form of post + processing. A boolean value that indicates whether the added word should be + used to fine-tune the mode for selected next-gen models. This field appears + in the response body only when it's 'For a custom model that is based on a + previous-generation model', the mapping_only field is populated with the + value set by the user, but would not be used. + :param List[WordError] error: (optional) If the service discovered one or + more problems that you need to correct for the word's definition, an array + that describes each of the errors. """ self.word = word + self.mapping_only = mapping_only self.sounds_like = sounds_like self.display_as = display_as self.count = count @@ -5258,45 +9755,52 @@ def __init__(self, word, sounds_like, display_as, count, source, self.error = error @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'Word': """Initialize a Word object from a json dictionary.""" args = {} - if 'word' in _dict: - args['word'] = _dict.get('word') + if (word := _dict.get('word')) is not None: + args['word'] = word else: raise ValueError( 'Required property \'word\' not present in Word JSON') - if 'sounds_like' in _dict: - args['sounds_like'] = _dict.get('sounds_like') + if (mapping_only := _dict.get('mapping_only')) is not None: + args['mapping_only'] = mapping_only + if (sounds_like := _dict.get('sounds_like')) is not None: + args['sounds_like'] = sounds_like else: raise ValueError( 'Required property \'sounds_like\' not present in Word JSON') - if 'display_as' in _dict: - args['display_as'] = _dict.get('display_as') + if (display_as := _dict.get('display_as')) is not None: + args['display_as'] = display_as else: raise ValueError( 'Required property \'display_as\' not present in Word JSON') - if 'count' in _dict: - args['count'] = _dict.get('count') + if (count := _dict.get('count')) is not None: + args['count'] = count else: raise ValueError( 'Required property \'count\' not present in Word JSON') - if 'source' in _dict: - args['source'] = _dict.get('source') + if (source := _dict.get('source')) is not None: + args['source'] = source else: raise ValueError( 'Required property \'source\' not present in Word JSON') - if 'error' in _dict: - args['error'] = [ - WordError._from_dict(x) for x in (_dict.get('error')) - ] + if (error := _dict.get('error')) is not None: + args['error'] = [WordError.from_dict(v) for v in error] return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a Word object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'word') and self.word is not None: _dict['word'] = self.word + if hasattr(self, 'mapping_only') and self.mapping_only is not None: + _dict['mapping_only'] = self.mapping_only if hasattr(self, 'sounds_like') and self.sounds_like is not None: _dict['sounds_like'] = self.sounds_like if hasattr(self, 'display_as') and self.display_as is not None: @@ -5306,63 +9810,82 @@ def _to_dict(self): if hasattr(self, 'source') and self.source is not None: _dict['source'] = self.source if hasattr(self, 'error') and self.error is not None: - _dict['error'] = [x._to_dict() for x in self.error] + error_list = [] + for v in self.error: + if isinstance(v, dict): + error_list.append(v) + else: + error_list.append(v.to_dict()) + _dict['error'] = error_list return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this Word object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'Word') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'Word') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class WordAlternativeResult(object): +class WordAlternativeResult: """ - WordAlternativeResult. + An alternative hypothesis for a word from speech recognition results. - :attr float confidence: A confidence score for the word alternative hypothesis in the - range of 0.0 to 1.0. - :attr str word: An alternative hypothesis for a word from the input audio. + :param float confidence: A confidence score for the word alternative hypothesis + in the range of 0.0 to 1.0. + :param str word: An alternative hypothesis for a word from the input audio. """ - def __init__(self, confidence, word): + def __init__( + self, + confidence: float, + word: str, + ) -> None: """ Initialize a WordAlternativeResult object. - :param float confidence: A confidence score for the word alternative hypothesis in - the range of 0.0 to 1.0. + :param float confidence: A confidence score for the word alternative + hypothesis in the range of 0.0 to 1.0. :param str word: An alternative hypothesis for a word from the input audio. """ self.confidence = confidence self.word = word @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'WordAlternativeResult': """Initialize a WordAlternativeResult object from a json dictionary.""" args = {} - if 'confidence' in _dict: - args['confidence'] = _dict.get('confidence') + if (confidence := _dict.get('confidence')) is not None: + args['confidence'] = confidence else: raise ValueError( 'Required property \'confidence\' not present in WordAlternativeResult JSON' ) - if 'word' in _dict: - args['word'] = _dict.get('word') + if (word := _dict.get('word')) is not None: + args['word'] = word else: raise ValueError( 'Required property \'word\' not present in WordAlternativeResult JSON' ) return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a WordAlternativeResult object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'confidence') and self.confidence is not None: @@ -5371,68 +9894,76 @@ def _to_dict(self): _dict['word'] = self.word return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this WordAlternativeResult object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'WordAlternativeResult') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'WordAlternativeResult') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class WordAlternativeResults(object): +class WordAlternativeResults: """ - WordAlternativeResults. - - :attr float start_time: The start time in seconds of the word from the input audio - that corresponds to the word alternatives. - :attr float end_time: The end time in seconds of the word from the input audio that - corresponds to the word alternatives. - :attr list[WordAlternativeResult] alternatives: An array of alternative hypotheses for - a word from the input audio. + Information about alternative hypotheses for words from speech recognition results. + + :param float start_time: The start time in seconds of the word from the input + audio that corresponds to the word alternatives. + :param float end_time: The end time in seconds of the word from the input audio + that corresponds to the word alternatives. + :param List[WordAlternativeResult] alternatives: An array of alternative + hypotheses for a word from the input audio. """ - def __init__(self, start_time, end_time, alternatives): + def __init__( + self, + start_time: float, + end_time: float, + alternatives: List['WordAlternativeResult'], + ) -> None: """ Initialize a WordAlternativeResults object. - :param float start_time: The start time in seconds of the word from the input - audio that corresponds to the word alternatives. - :param float end_time: The end time in seconds of the word from the input audio - that corresponds to the word alternatives. - :param list[WordAlternativeResult] alternatives: An array of alternative - hypotheses for a word from the input audio. + :param float start_time: The start time in seconds of the word from the + input audio that corresponds to the word alternatives. + :param float end_time: The end time in seconds of the word from the input + audio that corresponds to the word alternatives. + :param List[WordAlternativeResult] alternatives: An array of alternative + hypotheses for a word from the input audio. """ self.start_time = start_time self.end_time = end_time self.alternatives = alternatives @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'WordAlternativeResults': """Initialize a WordAlternativeResults object from a json dictionary.""" args = {} - if 'start_time' in _dict: - args['start_time'] = _dict.get('start_time') + if (start_time := _dict.get('start_time')) is not None: + args['start_time'] = start_time else: raise ValueError( 'Required property \'start_time\' not present in WordAlternativeResults JSON' ) - if 'end_time' in _dict: - args['end_time'] = _dict.get('end_time') + if (end_time := _dict.get('end_time')) is not None: + args['end_time'] = end_time else: raise ValueError( 'Required property \'end_time\' not present in WordAlternativeResults JSON' ) - if 'alternatives' in _dict: + if (alternatives := _dict.get('alternatives')) is not None: args['alternatives'] = [ - WordAlternativeResult._from_dict(x) - for x in (_dict.get('alternatives')) + WordAlternativeResult.from_dict(v) for v in alternatives ] else: raise ValueError( @@ -5440,7 +9971,12 @@ def _from_dict(cls, _dict): ) return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a WordAlternativeResults object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'start_time') and self.start_time is not None: @@ -5448,131 +9984,171 @@ def _to_dict(self): if hasattr(self, 'end_time') and self.end_time is not None: _dict['end_time'] = self.end_time if hasattr(self, 'alternatives') and self.alternatives is not None: - _dict['alternatives'] = [x._to_dict() for x in self.alternatives] + alternatives_list = [] + for v in self.alternatives: + if isinstance(v, dict): + alternatives_list.append(v) + else: + alternatives_list.append(v.to_dict()) + _dict['alternatives'] = alternatives_list return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this WordAlternativeResults object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'WordAlternativeResults') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'WordAlternativeResults') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class WordError(object): +class WordError: """ - WordError. - - :attr str element: A key-value pair that describes an error associated with the - definition of a word in the words resource. Each pair has the format `"element": - "message"`, where `element` is the aspect of the definition that caused the problem - and `message` describes the problem. The following example describes a problem with - one of the word's sounds-like definitions: `"{sounds_like_string}": "Numbers are not - allowed in sounds-like. You can try for example '{suggested_string}'."` You must - correct the error before you can train the model. + An error associated with a word from a custom language model. + + :param str element: A key-value pair that describes an error associated with the + definition of a word in the words resource. The pair has the format `"element": + "message"`, where `element` is the aspect of the definition that caused the + problem and `message` describes the problem. The following example describes a + problem with one of the word's sounds-like definitions: `"{sounds_like_string}": + "Numbers are not allowed in sounds-like. You can try for example + '{suggested_string}'."`. """ - def __init__(self, element): + def __init__( + self, + element: str, + ) -> None: """ Initialize a WordError object. - :param str element: A key-value pair that describes an error associated with the - definition of a word in the words resource. Each pair has the format `"element": - "message"`, where `element` is the aspect of the definition that caused the - problem and `message` describes the problem. The following example describes a - problem with one of the word's sounds-like definitions: `"{sounds_like_string}": - "Numbers are not allowed in sounds-like. You can try for example - '{suggested_string}'."` You must correct the error before you can train the model. + :param str element: A key-value pair that describes an error associated + with the definition of a word in the words resource. The pair has the + format `"element": "message"`, where `element` is the aspect of the + definition that caused the problem and `message` describes the problem. The + following example describes a problem with one of the word's sounds-like + definitions: `"{sounds_like_string}": "Numbers are not allowed in + sounds-like. You can try for example '{suggested_string}'."`. """ self.element = element @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'WordError': """Initialize a WordError object from a json dictionary.""" args = {} - if 'element' in _dict: - args['element'] = _dict.get('element') + if (element := _dict.get('element')) is not None: + args['element'] = element else: raise ValueError( 'Required property \'element\' not present in WordError JSON') return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a WordError object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'element') and self.element is not None: _dict['element'] = self.element return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this WordError object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'WordError') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'WordError') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class Words(object): +class Words: """ - Words. + Information about the words from a custom language model. - :attr list[Word] words: An array of `Word` objects that provides information about - each word in the custom model's words resource. The array is empty if the custom model - has no words. + :param List[Word] words: An array of `Word` objects that provides information + about each word in the custom model's words resource. The array is empty if the + custom model has no words. """ - def __init__(self, words): + def __init__( + self, + words: List['Word'], + ) -> None: """ Initialize a Words object. - :param list[Word] words: An array of `Word` objects that provides information - about each word in the custom model's words resource. The array is empty if the - custom model has no words. + :param List[Word] words: An array of `Word` objects that provides + information about each word in the custom model's words resource. The array + is empty if the custom model has no words. """ self.words = words @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'Words': """Initialize a Words object from a json dictionary.""" args = {} - if 'words' in _dict: - args['words'] = [Word._from_dict(x) for x in (_dict.get('words'))] + if (words := _dict.get('words')) is not None: + args['words'] = [Word.from_dict(v) for v in words] else: raise ValueError( 'Required property \'words\' not present in Words JSON') return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a Words object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'words') and self.words is not None: - _dict['words'] = [x._to_dict() for x in self.words] + words_list = [] + for v in self.words: + if isinstance(v, dict): + words_list.append(v) + else: + words_list.append(v.to_dict()) + _dict['words'] = words_list return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this Words object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'Words') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'Words') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other diff --git a/ibm_watson/speech_to_text_v1_adapter.py b/ibm_watson/speech_to_text_v1_adapter.py index d77aa340f..5f3b3969a 100644 --- a/ibm_watson/speech_to_text_v1_adapter.py +++ b/ibm_watson/speech_to_text_v1_adapter.py @@ -1,6 +1,6 @@ # coding: utf-8 -# Copyright 2018 IBM All Rights Reserved. +# (C) Copyright IBM Corp. 2018, 2024. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,15 +16,13 @@ from ibm_watson.websocket import RecognizeCallback, RecognizeListener, AudioSource from .speech_to_text_v1 import SpeechToTextV1 -import base64 -try: - from urllib.parse import urlencode -except ImportError: - from urllib import urlencode +from urllib.parse import urlencode BEARER = 'Bearer' + class SpeechToTextV1Adapter(SpeechToTextV1): + def recognize_using_websocket(self, audio, content_type, @@ -44,132 +42,280 @@ def recognize_using_websocket(self, timestamps=None, profanity_filter=None, smart_formatting=None, + smart_formatting_version=None, speaker_labels=None, http_proxy_host=None, http_proxy_port=None, - customization_id=None, grammar_name=None, redaction=None, + processing_metrics=None, + processing_metrics_interval=None, + audio_metrics=None, + end_of_phrase_silence_time=None, + split_transcript_at_phrase_end=None, + speech_detector_sensitivity=None, + background_audio_suppression=None, + low_latency=None, + character_insertion_bias=None, + sad_module=None, **kwargs): """ Sends audio for speech recognition using web sockets. + :param AudioSource audio: The audio to transcribe in the format specified by the - `Content-Type` header. + `Content-Type` header. :param str content_type: The type of the input: audio/basic, audio/flac, - audio/l16, audio/mp3, audio/mpeg, audio/mulaw, audio/ogg, audio/ogg;codecs=opus, - audio/ogg;codecs=vorbis, audio/wav, audio/webm, audio/webm;codecs=opus, or - audio/webm;codecs=vorbis. + audio/l16, audio/mp3, audio/mpeg, audio/mulaw, audio/ogg, audio/ogg;codecs=opus, + audio/ogg;codecs=vorbis, audio/wav, audio/webm, audio/webm;codecs=opus, or + audio/webm;codecs=vorbis. :param RecognizeCallback recognize_callback: The callback method for the websocket. - :param str model: The identifier of the model that is to be used for the - recognition request or, for the **Create a session** method, with the new session. - :param str language_customization_id: The customization ID (GUID) of a custom - language model that is to be used with the recognition request. The base model of - the specified custom language model must match the model specified with the - `model` parameter. You must make the request with service credentials created for - the instance of the service that owns the custom model. By default, no custom - language model is used. See [Custom - models](https://cloud.ibm.com/docs/services/speech-to-text?topic=speech-to-text-input#custom). - **Note:** Use this parameter instead of the deprecated `customization_id` - parameter. - :param str acoustic_customization_id: The customization ID (GUID) of a custom - acoustic model that is to be used with the recognition request or, for the - **Create a session** method, with the new session. The base model of the specified - custom acoustic model must match the model specified with the `model` parameter. - You must make the request with service credentials created for the instance of the - service that owns the custom model. By default, no custom acoustic model is used. - :param float customization_weight: If you specify the customization ID (GUID) of a - custom language model with the recognition request or, for sessions, with the - **Create a session** method, the customization weight tells the service how much - weight to give to words from the custom language model compared to those from the - base model for the current request. - Specify a value between 0.0 and 1.0. Unless a different customization weight was - specified for the custom model when it was trained, the default value is 0.3. A - customization weight that you specify overrides a weight that was specified when - the custom model was trained. - The default value yields the best performance in general. Assign a higher value if - your audio makes frequent use of OOV words from the custom model. Use caution when - setting the weight: a higher value can improve the accuracy of phrases from the - custom model's domain, but it can negatively affect performance on non-domain - phrases. - :param str base_model_version: The version of the specified base model that is to - be used with recognition request or, for the **Create a session** method, with the - new session. Multiple versions of a base model can exist when a model is updated - for internal improvements. The parameter is intended primarily for use with custom - models that have been upgraded for a new base model. The default value depends on - whether the parameter is used with or without a custom model. For more - information, see [Base model - version](https://cloud.ibm.com/docs/services/speech-to-text?topic=speech-to-text-input#version). - :param int inactivity_timeout: The time in seconds after which, if only silence - (no speech) is detected in submitted audio, the connection is closed with a 400 - error. Useful for stopping audio submission from a live microphone when a user - simply walks away. Use `-1` for infinity. - :param list[str] keywords: An array of keyword strings to spot in the audio. Each - keyword string can include one or more tokens. Keywords are spotted only in the - final hypothesis, not in interim results. If you specify any keywords, you must - also specify a keywords threshold. You can spot a maximum of 1000 keywords. Omit - the parameter or specify an empty array if you do not need to spot keywords. - :param float keywords_threshold: A confidence value that is the lower bound for - spotting a keyword. A word is considered to match a keyword if its confidence is - greater than or equal to the threshold. Specify a probability between 0 and 1 - inclusive. No keyword spotting is performed if you omit the parameter. If you - specify a threshold, you must also specify one or more keywords. - :param int max_alternatives: The maximum number of alternative transcripts to be - returned. By default, a single transcription is returned. - :param float word_alternatives_threshold: A confidence value that is the lower - bound for identifying a hypothesis as a possible word alternative (also known as - \"Confusion Networks\"). An alternative word is considered if its confidence is - greater than or equal to the threshold. Specify a probability between 0 and 1 - inclusive. No alternative words are computed if you omit the parameter. - :param bool word_confidence: If `true`, a confidence measure in the range of 0 to - 1 is returned for each word. By default, no word confidence measures are returned. - :param bool timestamps: If `true`, time alignment is returned for each word. By - default, no timestamps are returned. - :param bool profanity_filter: If `true` (the default), filters profanity from all - output except for keyword results by replacing inappropriate words with a series - of asterisks. Set the parameter to `false` to return results with no censoring. - Applies to US English transcription only. - :param bool smart_formatting: If `true`, converts dates, times, series of digits - and numbers, phone numbers, currency values, and internet addresses into more - readable, conventional representations in the final transcript of a recognition - request. For US English, also converts certain keyword strings to punctuation - symbols. By default, no smart formatting is performed. Applies to US English and - Spanish transcription only. - :param bool speaker_labels: If `true`, the response includes labels that identify - which words were spoken by which participants in a multi-person exchange. By - default, no speaker labels are returned. Setting `speaker_labels` to `true` forces - the `timestamps` parameter to be `true`, regardless of whether you specify `false` - for the parameter. - To determine whether a language model supports speaker labels, use the **Get - models** method and check that the attribute `speaker_labels` is set to `true`. - You can also refer to [Speaker - labels](https://cloud.ibm.com/docs/services/speech-to-text?topic=speech-to-text-output#speaker_labels). + :param str model: (optional) The identifier of the model that is to be used + for the recognition request. (**Note:** The model `ar-AR_BroadbandModel` is + deprecated; use `ar-MS_BroadbandModel` instead.) See [Languages and + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models) + and [Next-generation languages and + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng). + :param str language_customization_id: (optional) The customization ID + (GUID) of a custom language model that is to be used with the recognition + request. The base model of the specified custom language model must match + the model specified with the `model` parameter. You must make the request + with credentials for the instance of the service that owns the custom + model. By default, no custom language model is used. See [Using a custom + language model for speech + recognition](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-languageUse). + **Note:** Use this parameter instead of the deprecated `customization_id` + parameter. + :param str acoustic_customization_id: (optional) The customization ID + (GUID) of a custom acoustic model that is to be used with the recognition + request. The base model of the specified custom acoustic model must match + the model specified with the `model` parameter. You must make the request + with credentials for the instance of the service that owns the custom + model. By default, no custom acoustic model is used. See [Using a custom + acoustic model for speech + recognition](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-acousticUse). + :param str base_model_version: (optional) The version of the specified base + model that is to be used with the recognition request. Multiple versions of + a base model can exist when a model is updated for internal improvements. + The parameter is intended primarily for use with custom models that have + been upgraded for a new base model. The default value depends on whether + the parameter is used with or without a custom model. See [Making speech + recognition requests with upgraded custom + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-upgrade-use#custom-upgrade-use-recognition). + :param float customization_weight: (optional) If you specify the + customization ID (GUID) of a custom language model with the recognition + request, the customization weight tells the service how much weight to give + to words from the custom language model compared to those from the base + model for the current request. + Specify a value between 0.0 and 1.0. Unless a different customization + weight was specified for the custom model when it was trained, the default + value is 0.3. A customization weight that you specify overrides a weight + that was specified when the custom model was trained. + The default value yields the best performance in general. Assign a higher + value if your audio makes frequent use of OOV words from the custom model. + Use caution when setting the weight: a higher value can improve the + accuracy of phrases from the custom model's domain, but it can negatively + affect performance on non-domain phrases. + See [Using customization + weight](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-languageUse#weight). + :param int inactivity_timeout: (optional) The time in seconds after which, + if only silence (no speech) is detected in streaming audio, the connection + is closed with a 400 error. The parameter is useful for stopping audio + submission from a live microphone when a user simply walks away. Use `-1` + for infinity. See [Inactivity + timeout](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#timeouts-inactivity). + :param List[str] keywords: (optional) An array of keyword strings to spot + in the audio. Each keyword string can include one or more string tokens. + Keywords are spotted only in the final results, not in interim hypotheses. + If you specify any keywords, you must also specify a keywords threshold. + Omit the parameter or specify an empty array if you do not need to spot + keywords. + You can spot a maximum of 1000 keywords with a single request. A single + keyword can have a maximum length of 1024 characters, though the maximum + effective length for double-byte languages might be shorter. Keywords are + case-insensitive. + See [Keyword + spotting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-spotting#keyword-spotting). + :param float keywords_threshold: (optional) A confidence value that is the + lower bound for spotting a keyword. A word is considered to match a keyword + if its confidence is greater than or equal to the threshold. Specify a + probability between 0.0 and 1.0. If you specify a threshold, you must also + specify one or more keywords. The service performs no keyword spotting if + you omit either parameter. See [Keyword + spotting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-spotting#keyword-spotting). + :param int max_alternatives: (optional) The maximum number of alternative + transcripts that the service is to return. By default, the service returns + a single transcript. If you specify a value of `0`, the service uses the + default value, `1`. See [Maximum + alternatives](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metadata#max-alternatives). + :param float word_alternatives_threshold: (optional) A confidence value + that is the lower bound for identifying a hypothesis as a possible word + alternative (also known as "Confusion Networks"). An alternative word is + considered if its confidence is greater than or equal to the threshold. + Specify a probability between 0.0 and 1.0. By default, the service computes + no alternative words. See [Word + alternatives](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-spotting#word-alternatives). + :param bool word_confidence: (optional) If `true`, the service returns a + confidence measure in the range of 0.0 to 1.0 for each word. By default, + the service returns no word confidence scores. See [Word + confidence](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metadata#word-confidence). + :param bool timestamps: (optional) If `true`, the service returns time + alignment for each word. By default, no timestamps are returned. See [Word + timestamps](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metadata#word-timestamps). + :param bool profanity_filter: (optional) If `true`, the service filters + profanity from all output except for keyword results by replacing + inappropriate words with a series of asterisks. Set the parameter to + `false` to return results with no censoring. Applies to US English and + Japanese transcription only. See [Profanity + filtering](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-formatting#profanity-filtering). + :param bool smart_formatting: (optional) If `true`, the service converts + dates, times, series of digits and numbers, phone numbers, currency values, + and internet addresses into more readable, conventional representations in + the final transcript of a recognition request. For US English, the service + also converts certain keyword strings to punctuation symbols. By default, + the service performs no smart formatting. + **Note:** Applies to US English, Japanese, and Spanish transcription only. + See [Smart + formatting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-formatting#smart-formatting). + :param int smart_formatting_version: (optional) Smart formatting version is + for next-generation models and that is supported in US English, Brazilian + Portuguese, French and German languages. + :param bool speaker_labels: (optional) If `true`, the response includes + labels that identify which words were spoken by which participants in a + multi-person exchange. By default, the service returns no speaker labels. + Setting `speaker_labels` to `true` forces the `timestamps` parameter to be + `true`, regardless of whether you specify `false` for the parameter. + * For previous-generation models, can be used for US English, Australian + English, German, Japanese, Korean, and Spanish (both broadband and + narrowband models) and UK English (narrowband model) transcription only. + * For next-generation models, can be used for English (Australian, UK, and + US), German, and Spanish transcription only. + Restrictions and limitations apply to the use of speaker labels for both + types of models. See [Speaker + labels](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-speaker-labels). :param str http_proxy_host: http proxy host name. :param str http_proxy_port: http proxy port. If not set, set to 80. - :param str customization_id: **Deprecated.** Use the `language_customization_id` - parameter to specify the customization ID (GUID) of a custom language model that - is to be used with the recognition request. Do not specify both parameters with a - request. - :param str grammar_name: The name of a grammar that is to be used with the - recognition request. If you specify a grammar, you must also use the - `language_customization_id` parameter to specify the name of the custom language - model for which the grammar is defined. The service recognizes only strings that - are recognized by the specified grammar; it does not recognize other custom words - from the model's words resource. See - [Grammars](https://cloud.ibm.com/docs/services/speech-to-text/output.html). - :param bool redaction: If `true`, the service redacts, or masks, numeric data from - final transcripts. The feature redacts any number that has three or more - consecutive digits by replacing each digit with an `X` character. It is intended - to redact sensitive numeric data, such as credit card numbers. By default, the - service performs no redaction. - When you enable redaction, the service automatically enables smart formatting, - regardless of whether you explicitly disable that feature. To ensure maximum - security, the service also disables keyword spotting (ignores the `keywords` and - `keywords_threshold` parameters) and returns only a single final transcript - (forces the `max_alternatives` parameter to be `1`). - **Note:** Applies to US English, Japanese, and Korean transcription only. - See [Numeric - redaction](https://cloud.ibm.com/docs/services/speech-to-text/output.html#redaction). + :param str grammar_name: (optional) The name of a grammar that is to be + used with the recognition request. If you specify a grammar, you must also + use the `language_customization_id` parameter to specify the name of the + custom language model for which the grammar is defined. The service + recognizes only strings that are recognized by the specified grammar; it + does not recognize other custom words from the model's words resource. See + [Using a grammar for speech + recognition](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-grammarUse). + :param bool redaction: (optional) If `true`, the service redacts, or masks, + numeric data from final transcripts. The feature redacts any number that + has three or more consecutive digits by replacing each digit with an `X` + character. It is intended to redact sensitive numeric data, such as credit + card numbers. By default, the service performs no redaction. + When you enable redaction, the service automatically enables smart + formatting, regardless of whether you explicitly disable that feature. To + ensure maximum security, the service also disables keyword spotting + (ignores the `keywords` and `keywords_threshold` parameters) and returns + only a single final transcript (forces the `max_alternatives` parameter to + be `1`). + **Note:** Applies to US English, Japanese, and Korean transcription only. + See [Numeric + redaction](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-formatting#numeric-redaction). + :param bool audio_metrics: (optional) If `true`, requests detailed + information about the signal characteristics of the input audio. The + service returns audio metrics with the final transcription results. By + default, the service returns no audio metrics. + See [Audio + metrics](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metrics#audio-metrics). + :param float end_of_phrase_silence_time: (optional) If `true`, specifies + the duration of the pause interval at which the service splits a transcript + into multiple final results. If the service detects pauses or extended + silence before it reaches the end of the audio stream, its response can + include multiple final results. Silence indicates a point at which the + speaker pauses between spoken words or phrases. + Specify a value for the pause interval in the range of 0.0 to 120.0. + * A value greater than 0 specifies the interval that the service is to use + for speech recognition. + * A value of 0 indicates that the service is to use the default interval. + It is equivalent to omitting the parameter. + The default pause interval for most languages is 0.8 seconds; the default + for Chinese is 0.6 seconds. + See [End of phrase silence + time](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-parsing#silence-time). + :param bool split_transcript_at_phrase_end: (optional) If `true`, directs + the service to split the transcript into multiple final results based on + semantic features of the input, for example, at the conclusion of + meaningful phrases such as sentences. The service bases its understanding + of semantic features on the base language model that you use with a + request. Custom language models and grammars can also influence how and + where the service splits a transcript. By default, the service splits + transcripts based solely on the pause interval. + See [Split transcript at phrase + end](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-parsing#split-transcript). + :param float speech_detector_sensitivity: (optional) The sensitivity of + speech activity detection that the service is to perform. Use the parameter + to suppress word insertions from music, coughing, and other non-speech + events. The service biases the audio it passes for speech recognition by + evaluating the input audio against prior models of speech and non-speech + activity. + Specify a value between 0.0 and 1.0: + * 0.0 suppresses all audio (no speech is transcribed). + * 0.5 (the default) provides a reasonable compromise for the level of + sensitivity. + * 1.0 suppresses no audio (speech detection sensitivity is disabled). + The values increase on a monotonic curve. See [Speech detector + sensitivity](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-detection#detection-parameters-sensitivity). + :param float background_audio_suppression: (optional) The level to which + the service is to suppress background audio based on its volume to prevent + it from being transcribed as speech. Use the parameter to suppress side + conversations or background noise. + Specify a value in the range of 0.0 to 1.0: + * 0.0 (the default) provides no suppression (background audio suppression + is disabled). + * 0.5 provides a reasonable level of audio suppression for general usage. + * 1.0 suppresses all audio (no audio is transcribed). + The values increase on a monotonic curve. See [Background audio + suppression](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-detection#detection-parameters-suppression). + :param bool low_latency: (optional) If `true` for next-generation + `Multimedia` and `Telephony` models that support low latency, directs the + service to produce results even more quickly than it usually does. + Next-generation models produce transcription results faster than + previous-generation models. The `low_latency` parameter causes the models + to produce results even more quickly, though the results might be less + accurate when the parameter is used. + **Note:** The parameter is beta functionality. It is not available for + previous-generation `Broadband` and `Narrowband` models. It is available + only for some next-generation models. + * For a list of next-generation models that support low latency, see + [Supported language + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng#models-ng-supported) + for next-generation models. + * For more information about the `low_latency` parameter, see [Low + latency](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-interim#low-latency). + :param float character_insertion_bias: (optional) For next-generation + `Multimedia` and `Telephony` models, an indication of whether the service + is biased to recognize shorter or longer strings of characters when + developing transcription hypotheses. By default, the service is optimized + for each individual model to balance its recognition of strings of + different lengths. The model-specific bias is equivalent to 0.0. + The value that you specify represents a change from a model's default bias. + The allowable range of values is -1.0 to 1.0. + * Negative values bias the service to favor hypotheses with shorter strings + of characters. + * Positive values bias the service to favor hypotheses with longer strings + of characters. + As the value approaches -1.0 or 1.0, the impact of the parameter becomes + more pronounced. To determine the most effective value for your scenario, + start by setting the value of the parameter to a small increment, such as + -0.1, -0.05, 0.05, or 0.1, and assess how the value impacts the + transcription results. Then experiment with different values as necessary, + adjusting the value by small increments. + The parameter is not available for previous-generation `Broadband` and + `Narrowband` models. + See [Character insertion + bias](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-parsing#insertion-bias). + :param int sad_module: (optional) Detects speech boundaries within the + audio stream with better performance, improved noise suppression, faster + responsiveness, and increased accuracy. + Specify `sad_module: 2` + See [Speech Activity Detection + (SAD)](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-detection#sad). :param dict headers: A `dict` containing the request headers :return: A `dict` containing the `SpeechRecognitionResults` response. :rtype: dict @@ -178,7 +324,8 @@ def recognize_using_websocket(self, raise ValueError('audio must be provided') if not isinstance(audio, AudioSource): raise Exception( - 'audio is not of type AudioSource. Import the class from ibm_watson.websocket') + 'audio is not of type AudioSource. Import the class from ibm_watson.websocket' + ) if content_type is None: raise ValueError('content_type must be provided') if recognize_callback is None: @@ -187,33 +334,32 @@ def recognize_using_websocket(self, raise Exception( 'Callback is not a derived class of RecognizeCallback') + request = {} + headers = {} if self.default_headers is not None: headers = self.default_headers.copy() if 'headers' in kwargs: headers.update(kwargs.get('headers')) + request['headers'] = headers - if self.token_manager: - access_token = self.token_manager.get_token() - headers['Authorization'] = '{0} {1}'.format(BEARER, access_token) - else: - authstring = "{0}:{1}".format(self.username, self.password) - base64_authorization = base64.b64encode(authstring.encode('utf-8')).decode('utf-8') - headers['Authorization'] = 'Basic {0}'.format(base64_authorization) + if self.authenticator: + self.authenticator.authenticate(request) + + url = self.service_url.replace('https:', 'wss:') - url = self.url.replace('https:', 'wss:') params = { 'model': model, - 'customization_id': customization_id, 'acoustic_customization_id': acoustic_customization_id, - 'customization_weight': customization_weight, 'base_model_version': base_model_version, 'language_customization_id': language_customization_id } - params = dict([(k, v) for k, v in params.items() if v is not None]) + params = {k: v for k, v in params.items() if v is not None} url += '/v1/recognize?{0}'.format(urlencode(params)) + request['url'] = url options = { + 'customization_weight': customization_weight, 'content_type': content_type, 'inactivity_timeout': inactivity_timeout, 'interim_results': interim_results, @@ -225,17 +371,25 @@ def recognize_using_websocket(self, 'timestamps': timestamps, 'profanity_filter': profanity_filter, 'smart_formatting': smart_formatting, + 'smart_formatting_version': smart_formatting_version, 'speaker_labels': speaker_labels, 'grammar_name': grammar_name, - 'redaction': redaction + 'redaction': redaction, + 'processing_metrics': processing_metrics, + 'processing_metrics_interval': processing_metrics_interval, + 'audio_metrics': audio_metrics, + 'end_of_phrase_silence_time': end_of_phrase_silence_time, + 'split_transcript_at_phrase_end': split_transcript_at_phrase_end, + 'speech_detector_sensitivity': speech_detector_sensitivity, + 'background_audio_suppression': background_audio_suppression, + 'character_insertion_bias': character_insertion_bias, + 'low_latency': low_latency, + 'sad_module': sad_module, } - options = dict([(k, v) for k, v in options.items() if v is not None]) + options = {k: v for k, v in options.items() if v is not None} + request['options'] = options - RecognizeListener(audio, - options, - recognize_callback, - url, - headers, - http_proxy_host, - http_proxy_port, - self.verify) + RecognizeListener(audio, request.get('options'), recognize_callback, + request.get('url'), request.get('headers'), + http_proxy_host, http_proxy_port, + self.disable_ssl_verification) diff --git a/ibm_watson/text_to_speech_adapter_v1.py b/ibm_watson/text_to_speech_adapter_v1.py index 06fe03eb7..0cd22fd74 100644 --- a/ibm_watson/text_to_speech_adapter_v1.py +++ b/ibm_watson/text_to_speech_adapter_v1.py @@ -1,7 +1,6 @@ - # coding: utf-8 -# Copyright 2018 IBM All Rights Reserved. +# (C) Copyright IBM Corp. 2018, 2020. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,16 +15,14 @@ # limitations under the License. from ibm_watson.websocket import SynthesizeCallback, SynthesizeListener -import base64 from .text_to_speech_v1 import TextToSpeechV1 -try: - from urllib.parse import urlencode -except ImportError: - from urllib import urlencode +from urllib.parse import urlencode BEARER = 'Bearer' + class TextToSpeechV1Adapter(TextToSpeechV1): + def synthesize_using_websocket(self, text, synthesize_callback, @@ -33,6 +30,9 @@ def synthesize_using_websocket(self, voice=None, timings=None, customization_id=None, + spell_out_mode=None, + rate_percentage=None, + pitch_percentage=None, http_proxy_host=None, http_proxy_port=None, **kwargs): @@ -44,25 +44,70 @@ def synthesize_using_websocket(self, :param str text: Provides the text that is to be synthesized. The client can pass plain text or text that is annotated with the Speech Synthesis Markup Language (SSML). For more - information, see [Specifying input text](https://cloud.ibm.com/docs/services/text-to-speech?topic=text-to-speech-usingHTTP#input). + information, see [Specifying input text](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-usingHTTP#input). SSML input can also include the element; - see [Specifying an SSML mark](https://cloud.ibm.com/docs/services/text-to-speech?topic=text-to-speech-timing#mark). + see [Specifying an SSML mark](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-timing#mark). The client can pass a maximum of 5 KB of text with the request. :param SynthesizeCallback synthesize_callback: The callback method for the websocket. :param str accept: Specifies the requested format (MIME type) of the audio. For more information, see [Specifying - an audio format](https://cloud.ibm.com/docs/services/text-to-speech?topic=text-to-speech-usingHTTP#format). In addition to the + an audio format](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-usingHTTP#format). In addition to the supported specifications, you can use */* to specify the default audio format, audio/ogg;codecs=opus. :param str voice: The voice to use for synthesis. :param list[str] timings: Specifies that the service is to return word timing information for all strings of the input text. The service returns the start and end time of each string of the input. Specify words as the lone element of the array to request word timings. Specify an empty array or omit the parameter to receive no word timings. For - more information, see [Obtaining word timings](https://cloud.ibm.com/docs/services/text-to-speech?topic=text-to-speech-timing#timing). + more information, see [Obtaining word timings](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-timing#timing). Not supported for Japanese input text. :param str customization_id: Specifies the globally unique identifier (GUID) for a custom voice model that is to be used for the synthesis. A custom voice model is guaranteed to work only if it matches the language of the voice that is used for the synthesis. If you include a customization ID, you must call the method with the service credentials of the custom model's owner. Omit the parameter to use the specified voice with no customization. For more information, see [Understanding customization] - (https://cloud.ibm.com/docs/services/text-to-speech?topic=text-to-speech-customIntro#customIntro). + (https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-customIntro#customIntro). + :param str spell_out_mode: (optional) *For German voices,* indicates how + the service is to spell out strings of individual letters. To indicate the + pace of the spelling, specify one of the following values: + * `default` - The service reads the characters at the rate at which it + synthesizes speech for the request. You can also omit the parameter + entirely to achieve the default behavior. + * `singles` - The service reads the characters one at a time, with a brief + pause between each character. + * `pairs` - The service reads the characters two at a time, with a brief + pause between each pair. + * `triples` - The service reads the characters three at a time, with a + brief pause between each triplet. + The parameter is available only for IBM Cloud. + **See also:** [Specifying how strings are spelled + out](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-synthesis-params#params-spell-out-mode). + :param int rate_percentage: (optional) The percentage change from the + default speaking rate of the voice that is used for speech synthesis. Each + voice has a default speaking rate that is optimized to represent a normal + rate of speech. The parameter accepts an integer that represents the + percentage change from the voice's default rate: + * Specify a signed negative integer to reduce the speaking rate by that + percentage. For example, -10 reduces the rate by ten percent. + * Specify an unsigned or signed positive integer to increase the speaking + rate by that percentage. For example, 10 and +10 increase the rate by ten + percent. + * Specify 0 or omit the parameter to get the default speaking rate for the + voice. + The parameter affects the rate for an entire request. + For more information, see [Modifying the speaking + rate](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-synthesis-params#params-rate-percentage). + :param int pitch_percentage: (optional) The percentage change from the + default speaking pitch of the voice that is used for speech synthesis. Each + voice has a default speaking pitch that is optimized to represent a normal + tone of voice. The parameter accepts an integer that represents the + percentage change from the voice's default tone: + * Specify a signed negative integer to lower the voice's pitch by that + percentage. For example, -5 reduces the tone by five percent. + * Specify an unsigned or signed positive integer to increase the voice's + pitch by that percentage. For example, 5 and +5 increase the tone by five + percent. + * Specify 0 or omit the parameter to get the default speaking pitch for the + voice. + The parameter affects the pitch for an entire request. + For more information, see [Modifying the speaking + pitch](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-synthesis-params#params-pitch-percentage). :param str http_proxy_host: http proxy host name. :param str http_proxy_port: http proxy port. If not set, set to 80. :param dict headers: A `dict` containing the request headers @@ -77,39 +122,35 @@ def synthesize_using_websocket(self, raise Exception( 'Callback is not a derived class of SynthesizeCallback') + request = {} + headers = {} if self.default_headers is not None: headers = self.default_headers.copy() if 'headers' in kwargs: headers.update(kwargs.get('headers')) + request['headers'] = headers - if self.token_manager: - access_token = self.token_manager.get_token() - headers['Authorization'] = '{0} {1}'.format(BEARER, access_token) - else: - authstring = "{0}:{1}".format(self.username, self.password) - base64_authorization = base64.b64encode(authstring.encode('utf-8')).decode('utf-8') - headers['Authorization'] = 'Basic {0}'.format(base64_authorization) + if self.authenticator: + self.authenticator.authenticate(request) - url = self.url.replace('https:', 'wss:') + url = self.service_url.replace('https:', 'wss:') params = { 'voice': voice, 'customization_id': customization_id, + 'spell_out_mode': spell_out_mode, + 'rate_percentage': rate_percentage, + 'pitch_percentage': pitch_percentage } - params = dict([(k, v) for k, v in params.items() if v is not None]) + params = {k: v for k, v in params.items() if v is not None} url += '/v1/synthesize?{0}'.format(urlencode(params)) + request['url'] = url - options = { - 'text': text, - 'accept': accept, - 'timings': timings - } - options = dict([(k, v) for k, v in options.items() if v is not None]) + options = {'text': text, 'accept': accept, 'timings': timings} + options = {k: v for k, v in options.items() if v is not None} + request['options'] = options - SynthesizeListener(options, - synthesize_callback, - url, - headers, - http_proxy_host, - http_proxy_port, - self.verify) + SynthesizeListener(request.get('options'), synthesize_callback, + request.get('url'), request.get('headers'), + http_proxy_host, http_proxy_port, + self.disable_ssl_verification) diff --git a/ibm_watson/text_to_speech_v1.py b/ibm_watson/text_to_speech_v1.py index fcfddebab..7748b75b1 100644 --- a/ibm_watson/text_to_speech_v1.py +++ b/ibm_watson/text_to_speech_v1.py @@ -1,6 +1,6 @@ # coding: utf-8 -# Copyright 2018 IBM All Rights Reserved. +# (C) Copyright IBM Corp. 2015, 2025. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,30 +13,42 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +# IBM OpenAPI SDK Code Generator Version: 3.105.0-3c13b041-20250605-193116 """ -### Service Overview -The IBM® Text to Speech service provides APIs that use IBM's speech-synthesis +The IBM Watson™ Text to Speech service provides APIs that use IBM's speech-synthesis capabilities to synthesize text into natural-sounding speech in a variety of languages, dialects, and voices. The service supports at least one male or female voice, sometimes both, for each language. The audio is streamed back to the client with minimal delay. For speech synthesis, the service supports a synchronous HTTP Representational State -Transfer (REST) interface. It also supports a WebSocket interface that provides both plain -text and SSML input, including the SSML <mark> element and word timings. SSML is an -XML-based markup language that provides text annotation for speech-synthesis applications. -The service also offers a customization interface. You can use the interface to define -sounds-like or phonetic translations for words. A sounds-like translation consists of one -or more words that, when combined, sound like the word. A phonetic translation is based on -the SSML phoneme format for representing a word. You can specify a phonetic translation in -standard International Phonetic Alphabet (IPA) representation or in the proprietary IBM -Symbolic Phonetic Representation (SPR). +Transfer (REST) interface and a WebSocket interface. Both interfaces support plain text +and SSML input. SSML is an XML-based markup language that provides text annotation for +speech-synthesis applications. The WebSocket interface also supports the SSML +<mark> element and word timings. +The service offers a customization interface that you can use to define sounds-like or +phonetic translations for words. A sounds-like translation consists of one or more words +that, when combined, sound like the word. A phonetic translation is based on the SSML +phoneme format for representing a word. You can specify a phonetic translation in standard +International Phonetic Alphabet (IPA) representation or in the proprietary IBM Symbolic +Phonetic Representation (SPR). +The service also offers a Tune by Example feature that lets you define custom prompts. You +can also define speaker models to improve the quality of your custom prompts. The service +supports custom prompts only for US English custom models and voices. + +API Version: 1.0.0 +See: https://cloud.ibm.com/docs/text-to-speech """ -from __future__ import absolute_import - +from enum import Enum +from typing import BinaryIO, Dict, List, Optional import json + +from ibm_cloud_sdk_core import BaseService, DetailedResponse +from ibm_cloud_sdk_core.authenticators.authenticator import Authenticator +from ibm_cloud_sdk_core.get_authenticator import get_authenticator_from_environment +from ibm_cloud_sdk_core.utils import convert_model + from .common import get_sdk_headers -from ibm_cloud_sdk_core import BaseService -from os.path import basename ############################################################################## # Service @@ -46,143 +58,153 @@ class TextToSpeechV1(BaseService): """The Text to Speech V1 service.""" - default_url = 'https://stream.watsonplatform.net/text-to-speech/api' + DEFAULT_SERVICE_URL = 'https://api.us-south.text-to-speech.watson.cloud.ibm.com' + DEFAULT_SERVICE_NAME = 'text_to_speech' def __init__( - self, - url=default_url, - username=None, - password=None, - iam_apikey=None, - iam_access_token=None, - iam_url=None, - ): + self, + authenticator: Authenticator = None, + service_name: str = DEFAULT_SERVICE_NAME, + ) -> None: """ Construct a new client for the Text to Speech service. - :param str url: The base url to use when contacting the service (e.g. - "https://stream.watsonplatform.net/text-to-speech/api/text-to-speech/api"). - The base url may differ between IBM Cloud regions. - - :param str username: The username used to authenticate with the service. - Username and password credentials are only required to run your - application locally or outside of IBM Cloud. When running on - IBM Cloud, the credentials will be automatically loaded from the - `VCAP_SERVICES` environment variable. - - :param str password: The password used to authenticate with the service. - Username and password credentials are only required to run your - application locally or outside of IBM Cloud. When running on - IBM Cloud, the credentials will be automatically loaded from the - `VCAP_SERVICES` environment variable. - - :param str iam_apikey: An API key that can be used to request IAM tokens. If - this API key is provided, the SDK will manage the token and handle the - refreshing. - - :param str iam_access_token: An IAM access token is fully managed by the application. - Responsibility falls on the application to refresh the token, either before - it expires or reactively upon receiving a 401 from the service as any requests - made with an expired token will fail. - - :param str iam_url: An optional URL for the IAM service API. Defaults to - 'https://iam.cloud.ibm.com/identity/token'. + :param Authenticator authenticator: The authenticator specifies the authentication mechanism. + Get up to date information from https://github.com/IBM/python-sdk-core/blob/main/README.md + about initializing the authenticator of your choice. """ - - BaseService.__init__( - self, - vcap_services_name='text_to_speech', - url=url, - username=username, - password=password, - iam_apikey=iam_apikey, - iam_access_token=iam_access_token, - iam_url=iam_url, - use_vcap_services=True, - display_name='Text to Speech') + if not authenticator: + authenticator = get_authenticator_from_environment(service_name) + BaseService.__init__(self, + service_url=self.DEFAULT_SERVICE_URL, + authenticator=authenticator) + self.configure_service(service_name) ######################### # Voices ######################### - def get_voice(self, voice, customization_id=None, **kwargs): + def list_voices( + self, + **kwargs, + ) -> DetailedResponse: """ - Get a voice. + List voices. - Gets information about the specified voice. The information includes the name, - language, gender, and other details about the voice. Specify a customization ID to - obtain information for that custom voice model of the specified voice. To list - information about all available voices, use the **List voices** method. - **See also:** [Listing a specific - voice](https://cloud.ibm.com/docs/services/text-to-speech/voices.html#listVoice). + Lists all voices available for use with the service. The information includes the + name, language, gender, and other details about the voice. The ordering of the + list of voices can change from call to call; do not rely on an alphabetized or + static list of voices. To see information about a specific voice, use the [Get a + voice](#getvoice). + **See also:** [Listing all + voices](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-voices-list#list-all-voices). - :param str voice: The voice for which information is to be returned. - :param str customization_id: The customization ID (GUID) of a custom voice model - for which information is to be returned. You must make the request with service - credentials created for the instance of the service that owns the custom model. - Omit the parameter to see information about the specified voice with no - customization. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `Voices` object """ - if voice is None: - raise ValueError('voice must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('text_to_speech', 'V1', 'get_voice') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='list_voices', + ) headers.update(sdk_headers) - params = {'customization_id': customization_id} + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' - url = '/v1/voices/{0}'.format(*self._encode_path_vars(voice)) - response = self.request( + url = '/v1/voices' + request = self.prepare_request( method='GET', url=url, headers=headers, - params=params, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response - def list_voices(self, **kwargs): + def get_voice( + self, + voice: str, + *, + customization_id: Optional[str] = None, + **kwargs, + ) -> DetailedResponse: """ - List voices. + Get a voice. - Lists all voices available for use with the service. The information includes the - name, language, gender, and other details about the voice. To see information - about a specific voice, use the **Get a voice** method. - **See also:** [Listing all available - voices](https://cloud.ibm.com/docs/services/text-to-speech/voices.html#listVoices). + Gets information about the specified voice. The information includes the name, + language, gender, and other details about the voice. Specify a customization ID to + obtain information for a custom model that is defined for the language of the + specified voice. To list information about all available voices, use the [List + voices](#listvoices) method. + **See also:** [Listing a specific + voice](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-voices-list#list-specific-voice). + :param str voice: The voice for which information is to be returned. + :param str customization_id: (optional) The customization ID (GUID) of a + custom model for which information is to be returned. You must make the + request with credentials for the instance of the service that owns the + custom model. Omit the parameter to see information about the specified + voice with no customization. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `Voice` object """ + if not voice: + raise ValueError('voice must be provided') headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='get_voice', + ) + headers.update(sdk_headers) + + params = { + 'customization_id': customization_id, + } + if 'headers' in kwargs: headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('text_to_speech', 'V1', 'list_voices') - headers.update(sdk_headers) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['voice'] + path_param_values = self.encode_path_vars(voice) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/voices/{voice}'.format(**path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + params=params, + ) - url = '/v1/voices' - response = self.request( - method='GET', url=url, headers=headers, accept_json=True) + response = self.send(request, **kwargs) return response ######################### # Synthesis ######################### - def synthesize(self, - text, - voice=None, - customization_id=None, - accept=None, - **kwargs): + def synthesize( + self, + text: str, + *, + accept: Optional[str] = None, + voice: Optional[str] = None, + customization_id: Optional[str] = None, + spell_out_mode: Optional[str] = None, + rate_percentage: Optional[int] = None, + pitch_percentage: Optional[int] = None, + **kwargs, + ) -> DetailedResponse: """ Synthesize audio. @@ -193,12 +215,13 @@ def synthesize(self, 8 KB for the URL and headers. The 5 KB limit includes any SSML tags that you specify. The service returns the synthesized audio stream as an array of bytes. **See also:** [The HTTP - interface](https://cloud.ibm.com/docs/services/text-to-speech/http.html). + interface](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-usingHTTP#usingHTTP). ### Audio formats (accept types) The service can return audio in the following formats (MIME types). * Where indicated, you can optionally specify the sampling rate (`rate`) of the - audio. You must specify a sampling rate for the `audio/l16` and `audio/mulaw` - formats. A specified sampling rate must lie in the range of 8 kHz to 192 kHz. + audio. You must specify a sampling rate for the `audio/alaw`, `audio/l16`, and + `audio/mulaw` formats. A specified sampling rate must lie in the range of 8 kHz to + 192 kHz. Some formats restrict the sampling rate to certain values, as noted. * For the `audio/l16` format, you can optionally specify the endianness (`endianness`) of the audio: `endianness=big-endian` or `endianness=little-endian`. @@ -206,477 +229,580 @@ def synthesize(self, of the response audio. If you omit an audio format altogether, the service returns the audio in Ogg format with the Opus codec (`audio/ogg;codecs=opus`). The service always returns single-channel audio. - * `audio/basic` - The service returns audio with a sampling rate of 8000 Hz. - * `audio/flac` - You can optionally specify the `rate` of the audio. The default sampling rate is - 22,050 Hz. - * `audio/l16` - You must specify the `rate` of the audio. You can optionally specify the - `endianness` of the audio. The default endianness is `little-endian`. - * `audio/mp3` - You can optionally specify the `rate` of the audio. The default sampling rate is - 22,050 Hz. - * `audio/mpeg` - You can optionally specify the `rate` of the audio. The default sampling rate is - 22,050 Hz. - * `audio/mulaw` - You must specify the `rate` of the audio. - * `audio/ogg` - The service returns the audio in the `vorbis` codec. You can optionally specify - the `rate` of the audio. The default sampling rate is 22,050 Hz. - * `audio/ogg;codecs=opus` - You can optionally specify the `rate` of the audio. The default sampling rate is - 22,050 Hz. - * `audio/ogg;codecs=vorbis` - You can optionally specify the `rate` of the audio. The default sampling rate is - 22,050 Hz. - * `audio/wav` - You can optionally specify the `rate` of the audio. The default sampling rate is - 22,050 Hz. - * `audio/webm` - The service returns the audio in the `opus` codec. The service returns audio - with a sampling rate of 48,000 Hz. - * `audio/webm;codecs=opus` - The service returns audio with a sampling rate of 48,000 Hz. - * `audio/webm;codecs=vorbis` - You can optionally specify the `rate` of the audio. The default sampling rate is - 22,050 Hz. + * `audio/alaw` - You must specify the `rate` of the audio. + * `audio/basic` - The service returns audio with a sampling rate of 8000 Hz. + * `audio/flac` - You can optionally specify the `rate` of the audio. The default + sampling rate is 24,000 Hz for Natural voices and 22,050 Hz for all other voices. + * `audio/l16` - You must specify the `rate` of the audio. You can optionally + specify the `endianness` of the audio. The default endianness is `little-endian`. + * `audio/mp3` - You can optionally specify the `rate` of the audio. The default + sampling rate is 24,000 Hz for Natural voices and 22,050 Hz for for all other + voices. + * `audio/mpeg` - You can optionally specify the `rate` of the audio. The default + sampling rate is 24,000 Hz for Natural voices and 22,050 Hz for all other voices. + * `audio/mulaw` - You must specify the `rate` of the audio. + * `audio/ogg` - The service returns the audio in the `vorbis` codec. You can + optionally specify the `rate` of the audio. The default sampling rate is 48,000 + Hz. + * `audio/ogg;codecs=opus` - You can optionally specify the `rate` of the audio. + Only the following values are valid sampling rates: `48000`, `24000`, `16000`, + `12000`, or `8000`. If you specify a value other than one of these, the service + returns an error. The default sampling rate is 48,000 Hz. + * `audio/ogg;codecs=vorbis` - You can optionally specify the `rate` of the audio. + The default sampling rate is 48,000 Hz. + * `audio/wav` - You can optionally specify the `rate` of the audio. The default + sampling rate is 24,000 Hz for Natural voices and 22,050 Hz for all other voices. + * `audio/webm` - The service returns the audio in the `opus` codec. The service + returns audio with a sampling rate of 48,000 Hz. + * `audio/webm;codecs=opus` - The service returns audio with a sampling rate of + 48,000 Hz. + * `audio/webm;codecs=vorbis` - You can optionally specify the `rate` of the audio. + The default sampling rate is 48,000 Hz. For more information about specifying an audio format, including additional - details about some of the formats, see [Audio - formats](https://cloud.ibm.com/docs/services/text-to-speech/audio-formats.html). + details about some of the formats, see [Using audio + formats](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-audio-formats). + **Note:** By default, the service returns audio in the Ogg audio format with the + Opus codec (`audio/ogg;codecs=opus`). However, the Ogg audio format is not + supported with the Safari browser. If you are using the service with the Safari + browser, you must use the `Accept` request header or the `accept` query parameter + specify a different format in which you want the service to return the audio. ### Warning messages If a request includes invalid query parameters, the service returns a `Warnings` response header that provides messages about the invalid parameters. The warning includes a descriptive message and a list of invalid argument strings. For - example, a message such as `\"Unknown arguments:\"` or `\"Unknown url query - arguments:\"` followed by a list of the form `\"{invalid_arg_1}, - {invalid_arg_2}.\"` The request succeeds despite the warnings. + example, a message such as `"Unknown arguments:"` or `"Unknown url query + arguments:"` followed by a list of the form `"{invalid_arg_1}, {invalid_arg_2}."` + The request succeeds despite the warnings. :param str text: The text to synthesize. - :param str voice: The voice to use for synthesis. - :param str customization_id: The customization ID (GUID) of a custom voice model - to use for the synthesis. If a custom voice model is specified, it is guaranteed - to work only if it matches the language of the indicated voice. You must make the - request with service credentials created for the instance of the service that owns - the custom model. Omit the parameter to use the specified voice with no - customization. - :param str accept: The requested format (MIME type) of the audio. You can use the - `Accept` header or the `accept` parameter to specify the audio format. For more - information about specifying an audio format, see **Audio formats (accept types)** - in the method description. - Default: `audio/ogg;codecs=opus`. + :param str accept: (optional) The requested format (MIME type) of the + audio. You can use the `Accept` header or the `accept` parameter to specify + the audio format. For more information about specifying an audio format, + see **Audio formats (accept types)** in the method description. + :param str voice: (optional) The voice to use for speech synthesis. If you + omit the `voice` parameter, the service uses the US English + `en-US_MichaelV3Voice` by default. + _For IBM Cloud Pak for Data,_ if you do not install the + `en-US_MichaelV3Voice`, you must either specify a voice with the request or + specify a new default voice for your installation of the service. + **See also:** + * [Languages and + voices](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-voices) + * [Using the default + voice](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-voices-use#specify-voice-default). + :param str customization_id: (optional) The customization ID (GUID) of a + custom model to use for the synthesis. If a custom model is specified, it + works only if it matches the language of the indicated voice. You must make + the request with credentials for the instance of the service that owns the + custom model. Omit the parameter to use the specified voice with no + customization. + :param str spell_out_mode: (optional) *For German voices,* indicates how + the service is to spell out strings of individual letters. To indicate the + pace of the spelling, specify one of the following values: + * `default` - The service reads the characters at the rate at which it + synthesizes speech for the request. You can also omit the parameter + entirely to achieve the default behavior. + * `singles` - The service reads the characters one at a time, with a brief + pause between each character. + * `pairs` - The service reads the characters two at a time, with a brief + pause between each pair. + * `triples` - The service reads the characters three at a time, with a + brief pause between each triplet. + For more information, see [Specifying how strings are spelled + out](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-synthesis-params#params-spell-out-mode). + :param int rate_percentage: (optional) The percentage change from the + default speaking rate of the voice that is used for speech synthesis. Each + voice has a default speaking rate that is optimized to represent a normal + rate of speech. The parameter accepts an integer that represents the + percentage change from the voice's default rate: + * Specify a signed negative integer to reduce the speaking rate by that + percentage. For example, -10 reduces the rate by ten percent. + * Specify an unsigned or signed positive integer to increase the speaking + rate by that percentage. For example, 10 and +10 increase the rate by ten + percent. + * Specify 0 or omit the parameter to get the default speaking rate for the + voice. + The parameter affects the rate for an entire request. + For more information, see [Modifying the speaking + rate](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-synthesis-params#params-rate-percentage). + :param int pitch_percentage: (optional) The percentage change from the + default speaking pitch of the voice that is used for speech synthesis. Each + voice has a default speaking pitch that is optimized to represent a normal + tone of voice. The parameter accepts an integer that represents the + percentage change from the voice's default tone: + * Specify a signed negative integer to lower the voice's pitch by that + percentage. For example, -5 reduces the tone by five percent. + * Specify an unsigned or signed positive integer to increase the voice's + pitch by that percentage. For example, 5 and +5 increase the tone by five + percent. + * Specify 0 or omit the parameter to get the default speaking pitch for the + voice. + The parameter affects the pitch for an entire request. + For more information, see [Modifying the speaking + pitch](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-synthesis-params#params-pitch-percentage). :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `BinaryIO` result """ if text is None: raise ValueError('text must be provided') - - headers = {'Accept': accept} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('text_to_speech', 'V1', 'synthesize') + headers = { + 'Accept': accept, + } + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='synthesize', + ) headers.update(sdk_headers) - params = {'voice': voice, 'customization_id': customization_id} + params = { + 'voice': voice, + 'customization_id': customization_id, + 'spell_out_mode': spell_out_mode, + 'rate_percentage': rate_percentage, + 'pitch_percentage': pitch_percentage, + } + + data = { + 'text': text, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' - data = {'text': text} + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] url = '/v1/synthesize' - response = self.request( + request = self.prepare_request( method='POST', url=url, headers=headers, params=params, - json=data, - accept_json=False) + data=data, + ) + + response = self.send(request, **kwargs) return response ######################### # Pronunciation ######################### - def get_pronunciation(self, - text, - voice=None, - format=None, - customization_id=None, - **kwargs): + def get_pronunciation( + self, + text: str, + *, + voice: Optional[str] = None, + format: Optional[str] = None, + customization_id: Optional[str] = None, + **kwargs, + ) -> DetailedResponse: """ Get pronunciation. Gets the phonetic pronunciation for the specified word. You can request the pronunciation for a specific format. You can also request the pronunciation for a specific voice to see the default translation for the language of that voice or - for a specific custom voice model to see the translation for that voice model. - **Note:** This method is currently a beta release. + for a specific custom model to see the translation for that model. **See also:** [Querying a word from a - language](https://cloud.ibm.com/docs/services/text-to-speech/custom-entries.html#cuWordsQueryLanguage). + language](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-customWords#cuWordsQueryLanguage). :param str text: The word for which the pronunciation is requested. - :param str voice: A voice that specifies the language in which the pronunciation - is to be returned. All voices for the same language (for example, `en-US`) return - the same translation. - :param str format: The phoneme format in which to return the pronunciation. Omit - the parameter to obtain the pronunciation in the default format. - :param str customization_id: The customization ID (GUID) of a custom voice model - for which the pronunciation is to be returned. The language of a specified custom - model must match the language of the specified voice. If the word is not defined - in the specified custom model, the service returns the default translation for the - custom model's language. You must make the request with service credentials - created for the instance of the service that owns the custom model. Omit the - parameter to see the translation for the specified voice with no customization. + :param str voice: (optional) A voice that specifies the language in which + the pronunciation is to be returned. If you omit the `voice` parameter, the + service uses the US English `en-US_MichaelV3Voice` by default. All voices + for the same language (for example, `en-US`) return the same translation. + _For IBM Cloud Pak for Data,_ if you do not install the + `en-US_MichaelV3Voice`, you must either specify a voice with the request or + specify a new default voice for your installation of the service. + **See also:** [Using the default + voice](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-voices-use#specify-voice-default). + :param str format: (optional) The phoneme format in which to return the + pronunciation. Omit the parameter to obtain the pronunciation in the + default format. + :param str customization_id: (optional) The customization ID (GUID) of a + custom model for which the pronunciation is to be returned. The language of + a specified custom model must match the language of the specified voice. If + the word is not defined in the specified custom model, the service returns + the default translation for the custom model's language. You must make the + request with credentials for the instance of the service that owns the + custom model. Omit the parameter to see the translation for the specified + voice with no customization. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `Pronunciation` object """ - if text is None: + if not text: raise ValueError('text must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('text_to_speech', 'V1', - 'get_pronunciation') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='get_pronunciation', + ) headers.update(sdk_headers) params = { 'text': text, 'voice': voice, 'format': format, - 'customization_id': customization_id + 'customization_id': customization_id, } + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + url = '/v1/pronunciation' - response = self.request( + request = self.prepare_request( method='GET', url=url, headers=headers, params=params, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response ######################### # Custom models ######################### - def create_voice_model(self, - name, - language=None, - description=None, - **kwargs): + def create_custom_model( + self, + name: str, + *, + language: Optional[str] = None, + description: Optional[str] = None, + **kwargs, + ) -> DetailedResponse: """ Create a custom model. - Creates a new empty custom voice model. You must specify a name for the new custom + Creates a new empty custom model. You must specify a name for the new custom model. You can optionally specify the language and a description for the new model. The model is owned by the instance of the service whose credentials are used to create it. - **Note:** This method is currently a beta release. **See also:** [Creating a custom - model](https://cloud.ibm.com/docs/services/text-to-speech/custom-models.html#cuModelsCreate). - - :param str name: The name of the new custom voice model. - :param str language: The language of the new custom voice model. Omit the - parameter to use the the default language, `en-US`. - :param str description: A description of the new custom voice model. Specifying a - description is recommended. + model](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-customModels#cuModelsCreate). + + :param str name: The name of the new custom model. Use a localized name + that matches the language of the custom model. Use a name that describes + the purpose of the custom model, such as `Medical custom model` or `Legal + custom model`. Use a name that is unique among all custom models that you + own. + Include a maximum of 256 characters in the name. Do not use backslashes, + slashes, colons, equal signs, ampersands, or question marks in the name. + :param str language: (optional) The language of the new custom model. You + create a custom model for a specific language, not for a specific voice. A + custom model can be used with any voice for its specified language. Omit + the parameter to use the the default language, `en-US`. + :param str description: (optional) A recommended description of the new + custom model. Use a localized description that matches the language of the + custom model. Include a maximum of 128 characters in the description. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `CustomModel` object """ if name is None: raise ValueError('name must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('text_to_speech', 'V1', - 'create_voice_model') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='create_custom_model', + ) headers.update(sdk_headers) - data = {'name': name, 'language': language, 'description': description} + data = { + 'name': name, + 'language': language, + 'description': description, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' url = '/v1/customizations' - response = self.request( + request = self.prepare_request( method='POST', url=url, headers=headers, - json=data, - accept_json=True) - return response - - def delete_voice_model(self, customization_id, **kwargs): - """ - Delete a custom model. - - Deletes the specified custom voice model. You must use credentials for the - instance of the service that owns a model to delete it. - **Note:** This method is currently a beta release. - **See also:** [Deleting a custom - model](https://cloud.ibm.com/docs/services/text-to-speech/custom-models.html#cuModelsDelete). - - :param str customization_id: The customization ID (GUID) of the custom voice - model. You must make the request with service credentials created for the instance - of the service that owns the custom model. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if customization_id is None: - raise ValueError('customization_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('text_to_speech', 'V1', - 'delete_voice_model') - headers.update(sdk_headers) + data=data, + ) - url = '/v1/customizations/{0}'.format( - *self._encode_path_vars(customization_id)) - response = self.request( - method='DELETE', url=url, headers=headers, accept_json=False) + response = self.send(request, **kwargs) return response - def get_voice_model(self, customization_id, **kwargs): + def list_custom_models( + self, + *, + language: Optional[str] = None, + **kwargs, + ) -> DetailedResponse: """ - Get a custom model. + List custom models. - Gets all information about a specified custom voice model. In addition to metadata - such as the name and description of the voice model, the output includes the words - and their translations as defined in the model. To see just the metadata for a - voice model, use the **List custom models** method. - **Note:** This method is currently a beta release. - **See also:** [Querying a custom - model](https://cloud.ibm.com/docs/services/text-to-speech/custom-models.html#cuModelsQuery). + Lists metadata such as the name and description for all custom models that are + owned by an instance of the service. Specify a language to list the custom models + for that language only. To see the words and prompts in addition to the metadata + for a specific custom model, use the [Get a custom model](#getcustommodel) method. + You must use credentials for the instance of the service that owns a model to list + information about it. + **See also:** [Querying all custom + models](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-customModels#cuModelsQueryAll). - :param str customization_id: The customization ID (GUID) of the custom voice - model. You must make the request with service credentials created for the instance - of the service that owns the custom model. + :param str language: (optional) The language for which custom models that + are owned by the requesting credentials are to be returned. Omit the + parameter to see all custom models that are owned by the requester. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `CustomModels` object """ - if customization_id is None: - raise ValueError('customization_id must be provided') - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('text_to_speech', 'V1', 'get_voice_model') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='list_custom_models', + ) headers.update(sdk_headers) - url = '/v1/customizations/{0}'.format( - *self._encode_path_vars(customization_id)) - response = self.request( - method='GET', url=url, headers=headers, accept_json=True) - return response - - def list_voice_models(self, language=None, **kwargs): - """ - List custom models. - - Lists metadata such as the name and description for all custom voice models that - are owned by an instance of the service. Specify a language to list the voice - models for that language only. To see the words in addition to the metadata for a - specific voice model, use the **List a custom model** method. You must use - credentials for the instance of the service that owns a model to list information - about it. - **Note:** This method is currently a beta release. - **See also:** [Querying all custom - models](https://cloud.ibm.com/docs/services/text-to-speech/custom-models.html#cuModelsQueryAll). - - :param str language: The language for which custom voice models that are owned by - the requesting service credentials are to be returned. Omit the parameter to see - all custom voice models that are owned by the requester. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ + params = { + 'language': language, + } - headers = {} if 'headers' in kwargs: headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('text_to_speech', 'V1', - 'list_voice_models') - headers.update(sdk_headers) - - params = {'language': language} + del kwargs['headers'] + headers['Accept'] = 'application/json' url = '/v1/customizations' - response = self.request( + request = self.prepare_request( method='GET', url=url, headers=headers, params=params, - accept_json=True) + ) + + response = self.send(request, **kwargs) return response - def update_voice_model(self, - customization_id, - name=None, - description=None, - words=None, - **kwargs): + def update_custom_model( + self, + customization_id: str, + *, + name: Optional[str] = None, + description: Optional[str] = None, + words: Optional[List['Word']] = None, + **kwargs, + ) -> DetailedResponse: """ Update a custom model. - Updates information for the specified custom voice model. You can update metadata - such as the name and description of the voice model. You can also update the words - in the model and their translations. Adding a new translation for a word that - already exists in a custom model overwrites the word's existing translation. A - custom model can contain no more than 20,000 entries. You must use credentials for - the instance of the service that owns a model to update it. + Updates information for the specified custom model. You can update metadata such + as the name and description of the model. You can also update the words in the + model and their translations. Adding a new translation for a word that already + exists in a custom model overwrites the word's existing translation. A custom + model can contain no more than 20,000 entries. You must use credentials for the + instance of the service that owns a model to update it. You can define sounds-like or phonetic translations for words. A sounds-like translation consists of one or more words that, when combined, sound like the word. Phonetic translations are based on the SSML phoneme format for representing a word. You can specify them in standard International Phonetic Alphabet (IPA) representation - <phoneme alphabet=\"ipa\" - ph=\"təmˈɑto\"></phoneme> + <phoneme alphabet="ipa" + ph="təmˈɑto"></phoneme> or in the proprietary IBM Symbolic Phonetic Representation (SPR) - <phoneme alphabet=\"ibm\" - ph=\"1gAstroEntxrYFXs\"></phoneme> - **Note:** This method is currently a beta release. + <phoneme alphabet="ibm" + ph="1gAstroEntxrYFXs"></phoneme> **See also:** * [Updating a custom - model](https://cloud.ibm.com/docs/services/text-to-speech/custom-models.html#cuModelsUpdate) + model](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-customModels#cuModelsUpdate) * [Adding words to a Japanese custom - model](https://cloud.ibm.com/docs/services/text-to-speech/custom-entries.html#cuJapaneseAdd) + model](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-customWords#cuJapaneseAdd) * [Understanding - customization](https://cloud.ibm.com/docs/services/text-to-speech/custom-intro.html). - - :param str customization_id: The customization ID (GUID) of the custom voice - model. You must make the request with service credentials created for the instance - of the service that owns the custom model. - :param str name: A new name for the custom voice model. - :param str description: A new description for the custom voice model. - :param list[Word] words: An array of `Word` objects that provides the words and - their translations that are to be added or updated for the custom voice model. - Pass an empty array to make no additions or updates. + customization](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-customIntro#customIntro). + + :param str customization_id: The customization ID (GUID) of the custom + model. You must make the request with credentials for the instance of the + service that owns the custom model. + :param str name: (optional) A new name for the custom model. + :param str description: (optional) A new description for the custom model. + :param List[Word] words: (optional) An array of `Word` objects that + provides the words and their translations that are to be added or updated + for the custom model. Pass an empty array to make no additions or updates. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ - if customization_id is None: + if not customization_id: raise ValueError('customization_id must be provided') if words is not None: - words = [self._convert_model(x, Word) for x in words] - + words = [convert_model(x) for x in words] headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('text_to_speech', 'V1', - 'update_voice_model') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='update_custom_model', + ) headers.update(sdk_headers) - data = {'name': name, 'description': description, 'words': words} + data = { + 'name': name, + 'description': description, + 'words': words, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' - url = '/v1/customizations/{0}'.format( - *self._encode_path_vars(customization_id)) - response = self.request( + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id'] + path_param_values = self.encode_path_vars(customization_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/customizations/{customization_id}'.format(**path_param_dict) + request = self.prepare_request( method='POST', url=url, headers=headers, - json=data, - accept_json=True) - return response + data=data, + ) - ######################### - # Custom words - ######################### + response = self.send(request, **kwargs) + return response - def add_word(self, - customization_id, - word, - translation, - part_of_speech=None, - **kwargs): + def get_custom_model( + self, + customization_id: str, + **kwargs, + ) -> DetailedResponse: """ - Add a custom word. + Get a custom model. - Adds a single word and its translation to the specified custom voice model. Adding - a new translation for a word that already exists in a custom model overwrites the - word's existing translation. A custom model can contain no more than 20,000 - entries. You must use credentials for the instance of the service that owns a - model to add a word to it. - You can define sounds-like or phonetic translations for words. A sounds-like - translation consists of one or more words that, when combined, sound like the - word. Phonetic translations are based on the SSML phoneme format for representing - a word. You can specify them in standard International Phonetic Alphabet (IPA) - representation - <phoneme alphabet=\"ipa\" - ph=\"təmˈɑto\"></phoneme> - or in the proprietary IBM Symbolic Phonetic Representation (SPR) - <phoneme alphabet=\"ibm\" - ph=\"1gAstroEntxrYFXs\"></phoneme> - **Note:** This method is currently a beta release. - **See also:** - * [Adding a single word to a custom - model](https://cloud.ibm.com/docs/services/text-to-speech/custom-entries.html#cuWordAdd) - * [Adding words to a Japanese custom - model](https://cloud.ibm.com/docs/services/text-to-speech/custom-entries.html#cuJapaneseAdd) - * [Understanding - customization](https://cloud.ibm.com/docs/services/text-to-speech/custom-intro.html). - - :param str customization_id: The customization ID (GUID) of the custom voice - model. You must make the request with service credentials created for the instance - of the service that owns the custom model. - :param str word: The word that is to be added or updated for the custom voice - model. - :param str translation: The phonetic or sounds-like translation for the word. A - phonetic translation is based on the SSML format for representing the phonetic - string of a word either as an IPA translation or as an IBM SPR translation. A - sounds-like is one or more words that, when combined, sound like the word. - :param str part_of_speech: **Japanese only.** The part of speech for the word. The - service uses the value to produce the correct intonation for the word. You can - create only a single entry, with or without a single part of speech, for any word; - you cannot create multiple entries with different parts of speech for the same - word. For more information, see [Working with Japanese - entries](https://cloud.ibm.com/docs/services/text-to-speech/custom-rules.html#jaNotes). + Gets all information about a specified custom model. In addition to metadata such + as the name and description of the custom model, the output includes the words and + their translations that are defined for the model, as well as any prompts that are + defined for the model. To see just the metadata for a model, use the [List custom + models](#listcustommodels) method. + **See also:** [Querying a custom + model](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-customModels#cuModelsQuery). + + :param str customization_id: The customization ID (GUID) of the custom + model. You must make the request with credentials for the instance of the + service that owns the custom model. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `CustomModel` object """ - if customization_id is None: + if not customization_id: raise ValueError('customization_id must be provided') - if word is None: - raise ValueError('word must be provided') - if translation is None: - raise ValueError('translation must be provided') - headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='get_custom_model', + ) + headers.update(sdk_headers) + if 'headers' in kwargs: headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('text_to_speech', 'V1', 'add_word') + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id'] + path_param_values = self.encode_path_vars(customization_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/customizations/{customization_id}'.format(**path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + ) + + response = self.send(request, **kwargs) + return response + + def delete_custom_model( + self, + customization_id: str, + **kwargs, + ) -> DetailedResponse: + """ + Delete a custom model. + + Deletes the specified custom model. You must use credentials for the instance of + the service that owns a model to delete it. + **See also:** [Deleting a custom + model](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-customModels#cuModelsDelete). + + :param str customization_id: The customization ID (GUID) of the custom + model. You must make the request with credentials for the instance of the + service that owns the custom model. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse + """ + + if not customization_id: + raise ValueError('customization_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='delete_custom_model', + ) headers.update(sdk_headers) - data = {'translation': translation, 'part_of_speech': part_of_speech} + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] - url = '/v1/customizations/{0}/words/{1}'.format( - *self._encode_path_vars(customization_id, word)) - response = self.request( - method='PUT', + path_param_keys = ['customization_id'] + path_param_values = self.encode_path_vars(customization_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/customizations/{customization_id}'.format(**path_param_dict) + request = self.prepare_request( + method='DELETE', url=url, headers=headers, - json=data, - accept_json=False) + ) + + response = self.send(request, **kwargs) return response - def add_words(self, customization_id, words, **kwargs): + ######################### + # Custom words + ######################### + + def add_words( + self, + customization_id: str, + words: List['Word'], + **kwargs, + ) -> DetailedResponse: """ Add custom words. - Adds one or more words and their translations to the specified custom voice model. + Adds one or more words and their translations to the specified custom model. Adding a new translation for a word that already exists in a custom model overwrites the word's existing translation. A custom model can contain no more than 20,000 entries. You must use credentials for the instance of the service that @@ -686,314 +812,2380 @@ def add_words(self, customization_id, words, **kwargs): word. Phonetic translations are based on the SSML phoneme format for representing a word. You can specify them in standard International Phonetic Alphabet (IPA) representation - <phoneme alphabet=\"ipa\" - ph=\"təmˈɑto\"></phoneme> + <phoneme alphabet="ipa" + ph="təmˈɑto"></phoneme> or in the proprietary IBM Symbolic Phonetic Representation (SPR) - <phoneme alphabet=\"ibm\" - ph=\"1gAstroEntxrYFXs\"></phoneme> - **Note:** This method is currently a beta release. + <phoneme alphabet="ibm" + ph="1gAstroEntxrYFXs"></phoneme> **See also:** * [Adding multiple words to a custom - model](https://cloud.ibm.com/docs/services/text-to-speech/custom-entries.html#cuWordsAdd) + model](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-customWords#cuWordsAdd) * [Adding words to a Japanese custom - model](https://cloud.ibm.com/docs/services/text-to-speech/custom-entries.html#cuJapaneseAdd) + model](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-customWords#cuJapaneseAdd) * [Understanding - customization](https://cloud.ibm.com/docs/services/text-to-speech/custom-intro.html). - - :param str customization_id: The customization ID (GUID) of the custom voice - model. You must make the request with service credentials created for the instance - of the service that owns the custom model. - :param list[Word] words: The **Add custom words** method accepts an array of - `Word` objects. Each object provides a word that is to be added or updated for the - custom voice model and the word's translation. - The **List custom words** method returns an array of `Word` objects. Each object - shows a word and its translation from the custom voice model. The words are listed - in alphabetical order, with uppercase letters listed before lowercase letters. The - array is empty if the custom model contains no words. + customization](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-customIntro#customIntro). + + :param str customization_id: The customization ID (GUID) of the custom + model. You must make the request with credentials for the instance of the + service that owns the custom model. + :param List[Word] words: The [Add custom words](#addwords) method accepts + an array of `Word` objects. Each object provides a word that is to be added + or updated for the custom model and the word's translation. + The [List custom words](#listwords) method returns an array of `Word` + objects. Each object shows a word and its translation from the custom + model. The words are listed in alphabetical order, with uppercase letters + listed before lowercase letters. The array is empty if the custom model + contains no words. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ - if customization_id is None: + if not customization_id: raise ValueError('customization_id must be provided') if words is None: raise ValueError('words must be provided') - words = [self._convert_model(x, Word) for x in words] - + words = [convert_model(x) for x in words] headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('text_to_speech', 'V1', 'add_words') + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='add_words', + ) headers.update(sdk_headers) - data = {'words': words} + data = { + 'words': words, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' - url = '/v1/customizations/{0}/words'.format( - *self._encode_path_vars(customization_id)) - response = self.request( + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id'] + path_param_values = self.encode_path_vars(customization_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/customizations/{customization_id}/words'.format( + **path_param_dict) + request = self.prepare_request( method='POST', url=url, headers=headers, - json=data, - accept_json=True) + data=data, + ) + + response = self.send(request, **kwargs) return response - def delete_word(self, customization_id, word, **kwargs): + def list_words( + self, + customization_id: str, + **kwargs, + ) -> DetailedResponse: """ - Delete a custom word. + List custom words. - Deletes a single word from the specified custom voice model. You must use - credentials for the instance of the service that owns a model to delete its words. - **Note:** This method is currently a beta release. - **See also:** [Deleting a word from a custom - model](https://cloud.ibm.com/docs/services/text-to-speech/custom-entries.html#cuWordDelete). + Lists all of the words and their translations for the specified custom model. The + output shows the translations as they are defined in the model. You must use + credentials for the instance of the service that owns a model to list its words. + **See also:** [Querying all words from a custom + model](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-customWords#cuWordsQueryModel). + + :param str customization_id: The customization ID (GUID) of the custom + model. You must make the request with credentials for the instance of the + service that owns the custom model. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `Words` object + """ + + if not customization_id: + raise ValueError('customization_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='list_words', + ) + headers.update(sdk_headers) + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id'] + path_param_values = self.encode_path_vars(customization_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/customizations/{customization_id}/words'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + ) - :param str customization_id: The customization ID (GUID) of the custom voice - model. You must make the request with service credentials created for the instance - of the service that owns the custom model. - :param str word: The word that is to be deleted from the custom voice model. + response = self.send(request, **kwargs) + return response + + def add_word( + self, + customization_id: str, + word: str, + translation: str, + *, + part_of_speech: Optional[str] = None, + **kwargs, + ) -> DetailedResponse: + """ + Add a custom word. + + Adds a single word and its translation to the specified custom model. Adding a new + translation for a word that already exists in a custom model overwrites the word's + existing translation. A custom model can contain no more than 20,000 entries. You + must use credentials for the instance of the service that owns a model to add a + word to it. + You can define sounds-like or phonetic translations for words. A sounds-like + translation consists of one or more words that, when combined, sound like the + word. Phonetic translations are based on the SSML phoneme format for representing + a word. You can specify them in standard International Phonetic Alphabet (IPA) + representation + <phoneme alphabet="ipa" + ph="təmˈɑto"></phoneme> + or in the proprietary IBM Symbolic Phonetic Representation (SPR) + <phoneme alphabet="ibm" + ph="1gAstroEntxrYFXs"></phoneme> + **See also:** + * [Adding a single word to a custom + model](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-customWords#cuWordAdd) + * [Adding words to a Japanese custom + model](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-customWords#cuJapaneseAdd) + * [Understanding + customization](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-customIntro#customIntro). + + :param str customization_id: The customization ID (GUID) of the custom + model. You must make the request with credentials for the instance of the + service that owns the custom model. + :param str word: The word that is to be added or updated for the custom + model. + :param str translation: The phonetic or sounds-like translation for the + word. A phonetic translation is based on the SSML format for representing + the phonetic string of a word either as an IPA translation or as an IBM SPR + translation. A sounds-like is one or more words that, when combined, sound + like the word. + :param str part_of_speech: (optional) **Japanese only.** The part of speech + for the word. The service uses the value to produce the correct intonation + for the word. You can create only a single entry, with or without a single + part of speech, for any word; you cannot create multiple entries with + different parts of speech for the same word. For more information, see + [Working with Japanese + entries](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-rules#jaNotes). :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ - if customization_id is None: + if not customization_id: raise ValueError('customization_id must be provided') - if word is None: + if not word: raise ValueError('word must be provided') - + if translation is None: + raise ValueError('translation must be provided') headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='add_word', + ) + headers.update(sdk_headers) + + data = { + 'translation': translation, + 'part_of_speech': part_of_speech, + } + data = {k: v for (k, v) in data.items() if v is not None} + data = json.dumps(data) + headers['content-type'] = 'application/json' + if 'headers' in kwargs: headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('text_to_speech', 'V1', 'delete_word') - headers.update(sdk_headers) + del kwargs['headers'] + + path_param_keys = ['customization_id', 'word'] + path_param_values = self.encode_path_vars(customization_id, word) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/customizations/{customization_id}/words/{word}'.format( + **path_param_dict) + request = self.prepare_request( + method='PUT', + url=url, + headers=headers, + data=data, + ) - url = '/v1/customizations/{0}/words/{1}'.format( - *self._encode_path_vars(customization_id, word)) - response = self.request( - method='DELETE', url=url, headers=headers, accept_json=False) + response = self.send(request, **kwargs) return response - def get_word(self, customization_id, word, **kwargs): + def get_word( + self, + customization_id: str, + word: str, + **kwargs, + ) -> DetailedResponse: """ Get a custom word. Gets the translation for a single word from the specified custom model. The output shows the translation as it is defined in the model. You must use credentials for the instance of the service that owns a model to list its words. - **Note:** This method is currently a beta release. **See also:** [Querying a single word from a custom - model](https://cloud.ibm.com/docs/services/text-to-speech/custom-entries.html#cuWordQueryModel). + model](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-customWords#cuWordQueryModel). - :param str customization_id: The customization ID (GUID) of the custom voice - model. You must make the request with service credentials created for the instance - of the service that owns the custom model. - :param str word: The word that is to be queried from the custom voice model. + :param str customization_id: The customization ID (GUID) of the custom + model. You must make the request with credentials for the instance of the + service that owns the custom model. + :param str word: The word that is to be queried from the custom model. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :rtype: DetailedResponse with `dict` result representing a `Translation` object """ - if customization_id is None: + if not customization_id: raise ValueError('customization_id must be provided') - if word is None: + if not word: raise ValueError('word must be provided') - headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='get_word', + ) + headers.update(sdk_headers) + if 'headers' in kwargs: headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('text_to_speech', 'V1', 'get_word') - headers.update(sdk_headers) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id', 'word'] + path_param_values = self.encode_path_vars(customization_id, word) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/customizations/{customization_id}/words/{word}'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + ) - url = '/v1/customizations/{0}/words/{1}'.format( - *self._encode_path_vars(customization_id, word)) - response = self.request( - method='GET', url=url, headers=headers, accept_json=True) + response = self.send(request, **kwargs) return response - def list_words(self, customization_id, **kwargs): + def delete_word( + self, + customization_id: str, + word: str, + **kwargs, + ) -> DetailedResponse: """ - List custom words. + Delete a custom word. - Lists all of the words and their translations for the specified custom voice - model. The output shows the translations as they are defined in the model. You - must use credentials for the instance of the service that owns a model to list its - words. - **Note:** This method is currently a beta release. - **See also:** [Querying all words from a custom - model](https://cloud.ibm.com/docs/services/text-to-speech/custom-entries.html#cuWordsQueryModel). + Deletes a single word from the specified custom model. You must use credentials + for the instance of the service that owns a model to delete its words. + **See also:** [Deleting a word from a custom + model](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-customWords#cuWordDelete). - :param str customization_id: The customization ID (GUID) of the custom voice - model. You must make the request with service credentials created for the instance - of the service that owns the custom model. + :param str customization_id: The customization ID (GUID) of the custom + model. You must make the request with credentials for the instance of the + service that owns the custom model. + :param str word: The word that is to be deleted from the custom model. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ - if customization_id is None: + if not customization_id: raise ValueError('customization_id must be provided') - + if not word: + raise ValueError('word must be provided') headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='delete_word', + ) + headers.update(sdk_headers) + if 'headers' in kwargs: headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('text_to_speech', 'V1', 'list_words') - headers.update(sdk_headers) + del kwargs['headers'] + + path_param_keys = ['customization_id', 'word'] + path_param_values = self.encode_path_vars(customization_id, word) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/customizations/{customization_id}/words/{word}'.format( + **path_param_dict) + request = self.prepare_request( + method='DELETE', + url=url, + headers=headers, + ) - url = '/v1/customizations/{0}/words'.format( - *self._encode_path_vars(customization_id)) - response = self.request( - method='GET', url=url, headers=headers, accept_json=True) + response = self.send(request, **kwargs) return response ######################### - # User data + # Custom prompts ######################### - def delete_user_data(self, customer_id, **kwargs): + def list_custom_prompts( + self, + customization_id: str, + **kwargs, + ) -> DetailedResponse: """ - Delete labeled data. + List custom prompts. + + Lists information about all custom prompts that are defined for a custom model. + The information includes the prompt ID, prompt text, status, and optional speaker + ID for each prompt of the custom model. You must use credentials for the instance + of the service that owns the custom model. The same information about all of the + prompts for a custom model is also provided by the [Get a custom + model](#getcustommodel) method. That method provides complete details about a + specified custom model, including its language, owner, custom words, and more. + Custom prompts are supported only for use with US English custom models and + voices. + **See also:** [Listing custom + prompts](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-custom-prompts#tbe-custom-prompts-list). + + :param str customization_id: The customization ID (GUID) of the custom + model. You must make the request with credentials for the instance of the + service that owns the custom model. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `Prompts` object + """ + + if not customization_id: + raise ValueError('customization_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='list_custom_prompts', + ) + headers.update(sdk_headers) + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id'] + path_param_values = self.encode_path_vars(customization_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/customizations/{customization_id}/prompts'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + ) + + response = self.send(request, **kwargs) + return response + + def add_custom_prompt( + self, + customization_id: str, + prompt_id: str, + metadata: 'PromptMetadata', + file: BinaryIO, + **kwargs, + ) -> DetailedResponse: + """ + Add a custom prompt. + + Adds a custom prompt to a custom model. A prompt is defined by the text that is to + be spoken, the audio for that text, a unique user-specified ID for the prompt, and + an optional speaker ID. The information is used to generate prosodic data that is + not visible to the user. This data is used by the service to produce the + synthesized audio upon request. You must use credentials for the instance of the + service that owns a custom model to add a prompt to it. You can add a maximum of + 1000 custom prompts to a single custom model. + You are recommended to assign meaningful values for prompt IDs. For example, use + `goodbye` to identify a prompt that speaks a farewell message. Prompt IDs must be + unique within a given custom model. You cannot define two prompts with the same + name for the same custom model. If you provide the ID of an existing prompt, the + previously uploaded prompt is replaced by the new information. The existing prompt + is reprocessed by using the new text and audio and, if provided, new speaker + model, and the prosody data associated with the prompt is updated. + The quality of a prompt is undefined if the language of a prompt does not match + the language of its custom model. This is consistent with any text or SSML that is + specified for a speech synthesis request. The service makes a best-effort attempt + to render the specified text for the prompt; it does not validate that the + language of the text matches the language of the model. + Adding a prompt is an asynchronous operation. Although it accepts less audio than + speaker enrollment, the service must align the audio with the provided text. The + time that it takes to process a prompt depends on the prompt itself. The + processing time for a reasonably sized prompt generally matches the length of the + audio (for example, it takes 20 seconds to process a 20-second prompt). + For shorter prompts, you can wait for a reasonable amount of time and then check + the status of the prompt with the [Get a custom prompt](#getcustomprompt) method. + For longer prompts, consider using that method to poll the service every few + seconds to determine when the prompt becomes available. No prompt can be used for + speech synthesis if it is in the `processing` or `failed` state. Only prompts that + are in the `available` state can be used for speech synthesis. + When it processes a request, the service attempts to align the text and the audio + that are provided for the prompt. The text that is passed with a prompt must match + the spoken audio as closely as possible. Optimally, the text and audio match + exactly. The service does its best to align the specified text with the audio, and + it can often compensate for mismatches between the two. But if the service cannot + effectively align the text and the audio, possibly because the magnitude of + mismatches between the two is too great, processing of the prompt fails. + ### Evaluating a prompt + Always listen to and evaluate a prompt to determine its quality before using it + in production. To evaluate a prompt, include only the single prompt in a speech + synthesis request by using the following SSML extension, in this case for a prompt + whose ID is `goodbye`: + `` + In some cases, you might need to rerecord and resubmit a prompt as many as five + times to address the following possible problems: + * The service might fail to detect a mismatch between the prompt’s text and audio. + The longer the prompt, the greater the chance for misalignment between its text + and audio. Therefore, multiple shorter prompts are preferable to a single long + prompt. + * The text of a prompt might include a word that the service does not recognize. + In this case, you can create a custom word and pronunciation pair to tell the + service how to pronounce the word. You must then re-create the prompt. + * The quality of the input audio might be insufficient or the service’s processing + of the audio might fail to detect the intended prosody. Submitting new audio for + the prompt can correct these issues. + If a prompt that is created without a speaker ID does not adequately reflect the + intended prosody, enrolling the speaker and providing a speaker ID for the prompt + is one recommended means of potentially improving the quality of the prompt. This + is especially important for shorter prompts such as "good-bye" or "thank you," + where less audio data makes it more difficult to match the prosody of the speaker. + Custom prompts are supported only for use with US English custom models and + voices. + **See also:** + * [Add a custom + prompt](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-create#tbe-create-add-prompt) + * [Evaluate a custom + prompt](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-create#tbe-create-evaluate-prompt) + * [Rules for creating custom + prompts](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-rules#tbe-rules-prompts). + + :param str customization_id: The customization ID (GUID) of the custom + model. You must make the request with credentials for the instance of the + service that owns the custom model. + :param str prompt_id: The identifier of the prompt that is to be added to + the custom model: + * Include a maximum of 49 characters in the ID. + * Include only alphanumeric characters and `_` (underscores) in the ID. + * Do not include XML sensitive characters (double quotes, single quotes, + ampersands, angle brackets, and slashes) in the ID. + * To add a new prompt, the ID must be unique for the specified custom + model. Otherwise, the new information for the prompt overwrites the + existing prompt that has that ID. + :param PromptMetadata metadata: Information about the prompt that is to be + added to a custom model. The following example of a `PromptMetadata` object + includes both the required prompt text and an optional speaker model ID: + `{ "prompt_text": "Thank you and good-bye!", "speaker_id": + "823068b2-ed4e-11ea-b6e0-7b6456aa95cc" }`. + :param BinaryIO file: An audio file that speaks the text of the prompt with + intonation and prosody that matches how you would like the prompt to be + spoken. + * The prompt audio must be in WAV format and must have a minimum sampling + rate of 16 kHz. The service accepts audio with higher sampling rates. The + service transcodes all audio to 16 kHz before processing it. + * The length of the prompt audio is limited to 30 seconds. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `Prompt` object + """ + + if not customization_id: + raise ValueError('customization_id must be provided') + if not prompt_id: + raise ValueError('prompt_id must be provided') + if metadata is None: + raise ValueError('metadata must be provided') + if file is None: + raise ValueError('file must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='add_custom_prompt', + ) + headers.update(sdk_headers) + + form_data = [] + form_data.append( + ('metadata', (None, json.dumps(metadata), 'application/json'))) + form_data.append(('file', (None, file, 'audio/wav'))) + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id', 'prompt_id'] + path_param_values = self.encode_path_vars(customization_id, prompt_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/customizations/{customization_id}/prompts/{prompt_id}'.format( + **path_param_dict) + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + files=form_data, + ) + + response = self.send(request, **kwargs) + return response + + def get_custom_prompt( + self, + customization_id: str, + prompt_id: str, + **kwargs, + ) -> DetailedResponse: + """ + Get a custom prompt. + + Gets information about a specified custom prompt for a specified custom model. The + information includes the prompt ID, prompt text, status, and optional speaker ID + for each prompt of the custom model. You must use credentials for the instance of + the service that owns the custom model. Custom prompts are supported only for use + with US English custom models and voices. + **See also:** [Listing custom + prompts](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-custom-prompts#tbe-custom-prompts-list). + + :param str customization_id: The customization ID (GUID) of the custom + model. You must make the request with credentials for the instance of the + service that owns the custom model. + :param str prompt_id: The identifier (name) of the prompt. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `Prompt` object + """ + + if not customization_id: + raise ValueError('customization_id must be provided') + if not prompt_id: + raise ValueError('prompt_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='get_custom_prompt', + ) + headers.update(sdk_headers) + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['customization_id', 'prompt_id'] + path_param_values = self.encode_path_vars(customization_id, prompt_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/customizations/{customization_id}/prompts/{prompt_id}'.format( + **path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + ) + + response = self.send(request, **kwargs) + return response + + def delete_custom_prompt( + self, + customization_id: str, + prompt_id: str, + **kwargs, + ) -> DetailedResponse: + """ + Delete a custom prompt. + + Deletes an existing custom prompt from a custom model. The service deletes the + prompt with the specified ID. You must use credentials for the instance of the + service that owns the custom model from which the prompt is to be deleted. + **Caution:** Deleting a custom prompt elicits a 400 response code from synthesis + requests that attempt to use the prompt. Make sure that you do not attempt to use + a deleted prompt in a production application. Custom prompts are supported only + for use with US English custom models and voices. + **See also:** [Deleting a custom + prompt](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-custom-prompts#tbe-custom-prompts-delete). + + :param str customization_id: The customization ID (GUID) of the custom + model. You must make the request with credentials for the instance of the + service that owns the custom model. + :param str prompt_id: The identifier (name) of the prompt that is to be + deleted. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse + """ + + if not customization_id: + raise ValueError('customization_id must be provided') + if not prompt_id: + raise ValueError('prompt_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='delete_custom_prompt', + ) + headers.update(sdk_headers) + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + + path_param_keys = ['customization_id', 'prompt_id'] + path_param_values = self.encode_path_vars(customization_id, prompt_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/customizations/{customization_id}/prompts/{prompt_id}'.format( + **path_param_dict) + request = self.prepare_request( + method='DELETE', + url=url, + headers=headers, + ) + + response = self.send(request, **kwargs) + return response + + ######################### + # Speaker models + ######################### + + def list_speaker_models( + self, + **kwargs, + ) -> DetailedResponse: + """ + List speaker models. + + Lists information about all speaker models that are defined for a service + instance. The information includes the speaker ID and speaker name of each defined + speaker. You must use credentials for the instance of a service to list its + speakers. Speaker models and the custom prompts with which they are used are + supported only for use with US English custom models and voices. + **See also:** [Listing speaker + models](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-speaker-models#tbe-speaker-models-list). + + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `Speakers` object + """ + + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='list_speaker_models', + ) + headers.update(sdk_headers) + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + url = '/v1/speakers' + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + ) + + response = self.send(request, **kwargs) + return response + + def create_speaker_model( + self, + speaker_name: str, + audio: BinaryIO, + **kwargs, + ) -> DetailedResponse: + """ + Create a speaker model. + + Creates a new speaker model, which is an optional enrollment token for users who + are to add prompts to custom models. A speaker model contains information about a + user's voice. The service extracts this information from a WAV audio sample that + you pass as the body of the request. Associating a speaker model with a prompt is + optional, but the information that is extracted from the speaker model helps the + service learn about the speaker's voice. + A speaker model can make an appreciable difference in the quality of prompts, + especially short prompts with relatively little audio, that are associated with + that speaker. A speaker model can help the service produce a prompt with more + confidence; the lack of a speaker model can potentially compromise the quality of + a prompt. + The gender of the speaker who creates a speaker model does not need to match the + gender of a voice that is used with prompts that are associated with that speaker + model. For example, a speaker model that is created by a male speaker can be + associated with prompts that are spoken by female voices. + You create a speaker model for a given instance of the service. The new speaker + model is owned by the service instance whose credentials are used to create it. + That same speaker can then be used to create prompts for all custom models within + that service instance. No language is associated with a speaker model, but each + custom model has a single specified language. You can add prompts only to US + English models. + You specify a name for the speaker when you create it. The name must be unique + among all speaker names for the owning service instance. To re-create a speaker + model for an existing speaker name, you must first delete the existing speaker + model that has that name. + Speaker enrollment is a synchronous operation. Although it accepts more audio data + than a prompt, the process of adding a speaker is very fast. The service simply + extracts information about the speaker’s voice from the audio. Unlike prompts, + speaker models neither need nor accept a transcription of the audio. When the call + returns, the audio is fully processed and the speaker enrollment is complete. + The service returns a speaker ID with the request. A speaker ID is globally unique + identifier (GUID) that you use to identify the speaker in subsequent requests to + the service. Speaker models and the custom prompts with which they are used are + supported only for use with US English custom models and voices. + **See also:** + * [Create a speaker + model](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-create#tbe-create-speaker-model) + * [Rules for creating speaker + models](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-rules#tbe-rules-speakers). + + :param str speaker_name: The name of the speaker that is to be added to the + service instance. + * Include a maximum of 49 characters in the name. + * Include only alphanumeric characters and `_` (underscores) in the name. + * Do not include XML sensitive characters (double quotes, single quotes, + ampersands, angle brackets, and slashes) in the name. + * Do not use the name of an existing speaker that is already defined for + the service instance. + :param BinaryIO audio: An enrollment audio file that contains a sample of + the speaker’s voice. + * The enrollment audio must be in WAV format and must have a minimum + sampling rate of 16 kHz. The service accepts audio with higher sampling + rates. It transcodes all audio to 16 kHz before processing it. + * The length of the enrollment audio is limited to 1 minute. Speaking one + or two paragraphs of text that include five to ten sentences is + recommended. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `SpeakerModel` object + """ + + if not speaker_name: + raise ValueError('speaker_name must be provided') + if audio is None: + raise ValueError('audio must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='create_speaker_model', + ) + headers.update(sdk_headers) + + params = { + 'speaker_name': speaker_name, + } + + data = audio + headers['content-type'] = 'audio/wav' + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + url = '/v1/speakers' + request = self.prepare_request( + method='POST', + url=url, + headers=headers, + params=params, + data=data, + ) + + response = self.send(request, **kwargs) + return response + + def get_speaker_model( + self, + speaker_id: str, + **kwargs, + ) -> DetailedResponse: + """ + Get a speaker model. + + Gets information about all prompts that are defined by a specified speaker for all + custom models that are owned by a service instance. The information is grouped by + the customization IDs of the custom models. For each custom model, the information + lists information about each prompt that is defined for that custom model by the + speaker. You must use credentials for the instance of the service that owns a + speaker model to list its prompts. Speaker models and the custom prompts with + which they are used are supported only for use with US English custom models and + voices. + **See also:** [Listing the custom prompts for a speaker + model](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-speaker-models#tbe-speaker-models-list-prompts). + + :param str speaker_id: The speaker ID (GUID) of the speaker model. You must + make the request with service credentials for the instance of the service + that owns the speaker model. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse with `dict` result representing a `SpeakerCustomModels` object + """ + + if not speaker_id: + raise ValueError('speaker_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='get_speaker_model', + ) + headers.update(sdk_headers) + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + headers['Accept'] = 'application/json' + + path_param_keys = ['speaker_id'] + path_param_values = self.encode_path_vars(speaker_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/speakers/{speaker_id}'.format(**path_param_dict) + request = self.prepare_request( + method='GET', + url=url, + headers=headers, + ) + + response = self.send(request, **kwargs) + return response + + def delete_speaker_model( + self, + speaker_id: str, + **kwargs, + ) -> DetailedResponse: + """ + Delete a speaker model. + + Deletes an existing speaker model from the service instance. The service deletes + the enrolled speaker with the specified speaker ID. You must use credentials for + the instance of the service that owns a speaker model to delete the speaker. + Any prompts that are associated with the deleted speaker are not affected by the + speaker's deletion. The prosodic data that defines the quality of a prompt is + established when the prompt is created. A prompt is static and remains unaffected + by deletion of its associated speaker. However, the prompt cannot be resubmitted + or updated with its original speaker once that speaker is deleted. Speaker models + and the custom prompts with which they are used are supported only for use with US + English custom models and voices. + **See also:** [Deleting a speaker + model](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-speaker-models#tbe-speaker-models-delete). + + :param str speaker_id: The speaker ID (GUID) of the speaker model. You must + make the request with service credentials for the instance of the service + that owns the speaker model. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse + """ + + if not speaker_id: + raise ValueError('speaker_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='delete_speaker_model', + ) + headers.update(sdk_headers) + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + + path_param_keys = ['speaker_id'] + path_param_values = self.encode_path_vars(speaker_id) + path_param_dict = dict(zip(path_param_keys, path_param_values)) + url = '/v1/speakers/{speaker_id}'.format(**path_param_dict) + request = self.prepare_request( + method='DELETE', + url=url, + headers=headers, + ) + + response = self.send(request, **kwargs) + return response + + ######################### + # User data + ######################### + + def delete_user_data( + self, + customer_id: str, + **kwargs, + ) -> DetailedResponse: + """ + Delete labeled data. + + Deletes all data that is associated with a specified customer ID. The method + deletes all data for the customer ID, regardless of the method by which the + information was added. The method has no effect if no data is associated with the + customer ID. You must issue the request with credentials for the same instance of + the service that was used to associate the customer ID with the data. You + associate a customer ID with data by passing the `X-Watson-Metadata` header with a + request that passes the data. + **Note:** If you delete an instance of the service from the service console, all + data associated with that service instance is automatically deleted. This includes + all custom models and word/translation pairs, and all data related to speech + synthesis requests. + **See also:** [Information + security](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-information-security#information-security). + + :param str customer_id: The customer ID for which all data is to be + deleted. + :param dict headers: A `dict` containing the request headers + :return: A `DetailedResponse` containing the result, headers and HTTP status code. + :rtype: DetailedResponse + """ + + if not customer_id: + raise ValueError('customer_id must be provided') + headers = {} + sdk_headers = get_sdk_headers( + service_name=self.DEFAULT_SERVICE_NAME, + service_version='V1', + operation_id='delete_user_data', + ) + headers.update(sdk_headers) + + params = { + 'customer_id': customer_id, + } + + if 'headers' in kwargs: + headers.update(kwargs.get('headers')) + del kwargs['headers'] + + url = '/v1/user_data' + request = self.prepare_request( + method='DELETE', + url=url, + headers=headers, + params=params, + ) + + response = self.send(request, **kwargs) + return response + + +class GetVoiceEnums: + """ + Enums for get_voice parameters. + """ + + class Voice(str, Enum): + """ + The voice for which information is to be returned. + """ + + DE_DE_BIRGITV3VOICE = 'de-DE_BirgitV3Voice' + DE_DE_DIETERV3VOICE = 'de-DE_DieterV3Voice' + DE_DE_ERIKAV3VOICE = 'de-DE_ErikaV3Voice' + EN_AU_HEIDIEXPRESSIVE = 'en-AU_HeidiExpressive' + EN_AU_JACKEXPRESSIVE = 'en-AU_JackExpressive' + EN_CA_HANNAHNATURAL = 'en-CA_HannahNatural' + EN_GB_CHARLOTTEV3VOICE = 'en-GB_CharlotteV3Voice' + EN_GB_CHLOENATURAL = 'en-GB_ChloeNatural' + EN_GB_GEORGEEXPRESSIVE = 'en-GB_GeorgeExpressive' + EN_GB_JAMESV3VOICE = 'en-GB_JamesV3Voice' + EN_GB_GEORGENATURAL = 'en-GB_GeorgeNatural' + EN_GB_KATEV3VOICE = 'en-GB_KateV3Voice' + EN_US_ALLISONEXPRESSIVE = 'en-US_AllisonExpressive' + EN_US_ALLISONV3VOICE = 'en-US_AllisonV3Voice' + EN_US_ELLIENATURAL = 'en-US_EllieNatural' + EN_US_EMILYV3VOICE = 'en-US_EmilyV3Voice' + EN_US_EMMAEXPRESSIVE = 'en-US_EmmaExpressive' + EN_US_EMMANATURAL = 'en-US_EmmaNatural' + EN_US_ETHANNATURAL = 'en-US_EthanNatural' + EN_US_HENRYV3VOICE = 'en-US_HenryV3Voice' + EN_US_JACKSONNATURAL = 'en-US_JacksonNatural' + EN_US_KEVINV3VOICE = 'en-US_KevinV3Voice' + EN_US_LISAEXPRESSIVE = 'en-US_LisaExpressive' + EN_US_LISAV3VOICE = 'en-US_LisaV3Voice' + EN_US_MICHAELEXPRESSIVE = 'en-US_MichaelExpressive' + EN_US_MICHAELV3VOICE = 'en-US_MichaelV3Voice' + EN_US_OLIVIAV3VOICE = 'en-US_OliviaV3Voice' + EN_US_VICTORIANATURAL = 'en-US_VictoriaNatural' + ES_ES_ENRIQUEV3VOICE = 'es-ES_EnriqueV3Voice' + ES_ES_LAURAV3VOICE = 'es-ES_LauraV3Voice' + ES_LA_DANIELAEXPRESSIVE = 'es-LA_DanielaExpressive' + ES_LA_SOFIAV3VOICE = 'es-LA_SofiaV3Voice' + ES_US_SOFIAV3VOICE = 'es-US_SofiaV3Voice' + FR_CA_LOUISEV3VOICE = 'fr-CA_LouiseV3Voice' + FR_FR_NICOLASV3VOICE = 'fr-FR_NicolasV3Voice' + FR_FR_RENEEV3VOICE = 'fr-FR_ReneeV3Voice' + IT_IT_FRANCESCAV3VOICE = 'it-IT_FrancescaV3Voice' + JA_JP_EMIV3VOICE = 'ja-JP_EmiV3Voice' + KO_KR_JINV3VOICE = 'ko-KR_JinV3Voice' + NL_NL_MERELV3VOICE = 'nl-NL_MerelV3Voice' + PT_BR_CAMILANATURAL = 'pt-BR_CamilaNatural' + PT_BR_ISABELAV3VOICE = 'pt-BR_IsabelaV3Voice' + PT_BR_LUCASEXPRESSIVE = 'pt-BR_LucasExpressive' + PT_BR_LUCASNATURAL = 'pt-BR_LucasNatural' + + +class SynthesizeEnums: + """ + Enums for synthesize parameters. + """ + + class Accept(str, Enum): + """ + The requested format (MIME type) of the audio. You can use the `Accept` header or + the `accept` parameter to specify the audio format. For more information about + specifying an audio format, see **Audio formats (accept types)** in the method + description. + """ + + AUDIO_ALAW = 'audio/alaw' + AUDIO_BASIC = 'audio/basic' + AUDIO_FLAC = 'audio/flac' + AUDIO_L16 = 'audio/l16' + AUDIO_OGG = 'audio/ogg' + AUDIO_OGG_CODECS_OPUS = 'audio/ogg;codecs=opus' + AUDIO_OGG_CODECS_VORBIS = 'audio/ogg;codecs=vorbis' + AUDIO_MP3 = 'audio/mp3' + AUDIO_MPEG = 'audio/mpeg' + AUDIO_MULAW = 'audio/mulaw' + AUDIO_WAV = 'audio/wav' + AUDIO_WEBM = 'audio/webm' + AUDIO_WEBM_CODECS_OPUS = 'audio/webm;codecs=opus' + AUDIO_WEBM_CODECS_VORBIS = 'audio/webm;codecs=vorbis' + + class Voice(str, Enum): + """ + The voice to use for speech synthesis. If you omit the `voice` parameter, the + service uses the US English `en-US_MichaelV3Voice` by default. + _For IBM Cloud Pak for Data,_ if you do not install the `en-US_MichaelV3Voice`, + you must either specify a voice with the request or specify a new default voice + for your installation of the service. + **See also:** + * [Languages and + voices](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-voices) + * [Using the default + voice](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-voices-use#specify-voice-default). + """ + + DE_DE_BIRGITV3VOICE = 'de-DE_BirgitV3Voice' + DE_DE_DIETERV3VOICE = 'de-DE_DieterV3Voice' + DE_DE_ERIKAV3VOICE = 'de-DE_ErikaV3Voice' + EN_AU_HEIDIEXPRESSIVE = 'en-AU_HeidiExpressive' + EN_AU_JACKEXPRESSIVE = 'en-AU_JackExpressive' + EN_CA_HANNAHNATURAL = 'en-CA_HannahNatural' + EN_GB_CHARLOTTEV3VOICE = 'en-GB_CharlotteV3Voice' + EN_GB_CHLOENATURAL = 'en-GB_ChloeNatural' + EN_GB_GEORGEEXPRESSIVE = 'en-GB_GeorgeExpressive' + EN_GB_JAMESV3VOICE = 'en-GB_JamesV3Voice' + EN_GB_GEORGENATURAL = 'en-GB_GeorgeNatural' + EN_GB_KATEV3VOICE = 'en-GB_KateV3Voice' + EN_US_ALLISONEXPRESSIVE = 'en-US_AllisonExpressive' + EN_US_ALLISONV3VOICE = 'en-US_AllisonV3Voice' + EN_US_ELLIENATURAL = 'en-US_EllieNatural' + EN_US_EMILYV3VOICE = 'en-US_EmilyV3Voice' + EN_US_EMMAEXPRESSIVE = 'en-US_EmmaExpressive' + EN_US_EMMANATURAL = 'en-US_EmmaNatural' + EN_US_ETHANNATURAL = 'en-US_EthanNatural' + EN_US_HENRYV3VOICE = 'en-US_HenryV3Voice' + EN_US_JACKSONNATURAL = 'en-US_JacksonNatural' + EN_US_KEVINV3VOICE = 'en-US_KevinV3Voice' + EN_US_LISAEXPRESSIVE = 'en-US_LisaExpressive' + EN_US_LISAV3VOICE = 'en-US_LisaV3Voice' + EN_US_MICHAELEXPRESSIVE = 'en-US_MichaelExpressive' + EN_US_MICHAELV3VOICE = 'en-US_MichaelV3Voice' + EN_US_OLIVIAV3VOICE = 'en-US_OliviaV3Voice' + EN_US_VICTORIANATURAL = 'en-US_VictoriaNatural' + ES_ES_ENRIQUEV3VOICE = 'es-ES_EnriqueV3Voice' + ES_ES_LAURAV3VOICE = 'es-ES_LauraV3Voice' + ES_LA_DANIELAEXPRESSIVE = 'es-LA_DanielaExpressive' + ES_LA_SOFIAV3VOICE = 'es-LA_SofiaV3Voice' + ES_US_SOFIAV3VOICE = 'es-US_SofiaV3Voice' + FR_CA_LOUISEV3VOICE = 'fr-CA_LouiseV3Voice' + FR_FR_NICOLASV3VOICE = 'fr-FR_NicolasV3Voice' + FR_FR_RENEEV3VOICE = 'fr-FR_ReneeV3Voice' + IT_IT_FRANCESCAV3VOICE = 'it-IT_FrancescaV3Voice' + JA_JP_EMIV3VOICE = 'ja-JP_EmiV3Voice' + KO_KR_JINV3VOICE = 'ko-KR_JinV3Voice' + NL_NL_MERELV3VOICE = 'nl-NL_MerelV3Voice' + PT_BR_CAMILANATURAL = 'pt-BR_CamilaNatural' + PT_BR_ISABELAV3VOICE = 'pt-BR_IsabelaV3Voice' + PT_BR_LUCASEXPRESSIVE = 'pt-BR_LucasExpressive' + PT_BR_LUCASNATURAL = 'pt-BR_LucasNatural' + + class SpellOutMode(str, Enum): + """ + *For German voices,* indicates how the service is to spell out strings of + individual letters. To indicate the pace of the spelling, specify one of the + following values: + * `default` - The service reads the characters at the rate at which it synthesizes + speech for the request. You can also omit the parameter entirely to achieve the + default behavior. + * `singles` - The service reads the characters one at a time, with a brief pause + between each character. + * `pairs` - The service reads the characters two at a time, with a brief pause + between each pair. + * `triples` - The service reads the characters three at a time, with a brief pause + between each triplet. + For more information, see [Specifying how strings are spelled + out](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-synthesis-params#params-spell-out-mode). + """ + + DEFAULT = 'default' + SINGLES = 'singles' + PAIRS = 'pairs' + TRIPLES = 'triples' + + +class GetPronunciationEnums: + """ + Enums for get_pronunciation parameters. + """ + + class Voice(str, Enum): + """ + A voice that specifies the language in which the pronunciation is to be returned. + If you omit the `voice` parameter, the service uses the US English + `en-US_MichaelV3Voice` by default. All voices for the same language (for example, + `en-US`) return the same translation. + _For IBM Cloud Pak for Data,_ if you do not install the `en-US_MichaelV3Voice`, + you must either specify a voice with the request or specify a new default voice + for your installation of the service. + **See also:** [Using the default + voice](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-voices-use#specify-voice-default). + """ + + DE_DE_BIRGITV3VOICE = 'de-DE_BirgitV3Voice' + DE_DE_DIETERV3VOICE = 'de-DE_DieterV3Voice' + DE_DE_ERIKAV3VOICE = 'de-DE_ErikaV3Voice' + EN_AU_HEIDIEXPRESSIVE = 'en-AU_HeidiExpressive' + EN_AU_JACKEXPRESSIVE = 'en-AU_JackExpressive' + EN_CA_HANNAHNATURAL = 'en-CA_HannahNatural' + EN_GB_CHARLOTTEV3VOICE = 'en-GB_CharlotteV3Voice' + EN_GB_CHLOENATURAL = 'en-GB_ChloeNatural' + EN_GB_GEORGEEXPRESSIVE = 'en-GB_GeorgeExpressive' + EN_GB_JAMESV3VOICE = 'en-GB_JamesV3Voice' + EN_GB_GEORGENATURAL = 'en-GB_GeorgeNatural' + EN_GB_KATEV3VOICE = 'en-GB_KateV3Voice' + EN_US_ALLISONEXPRESSIVE = 'en-US_AllisonExpressive' + EN_US_ALLISONV3VOICE = 'en-US_AllisonV3Voice' + EN_US_ELLIENATURAL = 'en-US_EllieNatural' + EN_US_EMILYV3VOICE = 'en-US_EmilyV3Voice' + EN_US_EMMAEXPRESSIVE = 'en-US_EmmaExpressive' + EN_US_EMMANATURAL = 'en-US_EmmaNatural' + EN_US_ETHANNATURAL = 'en-US_EthanNatural' + EN_US_HENRYV3VOICE = 'en-US_HenryV3Voice' + EN_US_JACKSONNATURAL = 'en-US_JacksonNatural' + EN_US_KEVINV3VOICE = 'en-US_KevinV3Voice' + EN_US_LISAEXPRESSIVE = 'en-US_LisaExpressive' + EN_US_LISAV3VOICE = 'en-US_LisaV3Voice' + EN_US_MICHAELEXPRESSIVE = 'en-US_MichaelExpressive' + EN_US_MICHAELV3VOICE = 'en-US_MichaelV3Voice' + EN_US_OLIVIAV3VOICE = 'en-US_OliviaV3Voice' + EN_US_VICTORIANATURAL = 'en-US_VictoriaNatural' + ES_ES_ENRIQUEV3VOICE = 'es-ES_EnriqueV3Voice' + ES_ES_LAURAV3VOICE = 'es-ES_LauraV3Voice' + ES_LA_DANIELAEXPRESSIVE = 'es-LA_DanielaExpressive' + ES_LA_SOFIAV3VOICE = 'es-LA_SofiaV3Voice' + ES_US_SOFIAV3VOICE = 'es-US_SofiaV3Voice' + FR_CA_LOUISEV3VOICE = 'fr-CA_LouiseV3Voice' + FR_FR_NICOLASV3VOICE = 'fr-FR_NicolasV3Voice' + FR_FR_RENEEV3VOICE = 'fr-FR_ReneeV3Voice' + IT_IT_FRANCESCAV3VOICE = 'it-IT_FrancescaV3Voice' + JA_JP_EMIV3VOICE = 'ja-JP_EmiV3Voice' + KO_KR_JINV3VOICE = 'ko-KR_JinV3Voice' + NL_NL_MERELV3VOICE = 'nl-NL_MerelV3Voice' + PT_BR_CAMILANATURAL = 'pt-BR_CamilaNatural' + PT_BR_ISABELAV3VOICE = 'pt-BR_IsabelaV3Voice' + PT_BR_LUCASEXPRESSIVE = 'pt-BR_LucasExpressive' + PT_BR_LUCASNATURAL = 'pt-BR_LucasNatural' + + class Format(str, Enum): + """ + The phoneme format in which to return the pronunciation. Omit the parameter to + obtain the pronunciation in the default format. + """ + + IBM = 'ibm' + IPA = 'ipa' + + +class ListCustomModelsEnums: + """ + Enums for list_custom_models parameters. + """ + + class Language(str, Enum): + """ + The language for which custom models that are owned by the requesting credentials + are to be returned. Omit the parameter to see all custom models that are owned by + the requester. + """ + + DE_DE = 'de-DE' + EN_AU = 'en-AU' + EN_GB = 'en-GB' + EN_US = 'en-US' + ES_ES = 'es-ES' + ES_LA = 'es-LA' + ES_US = 'es-US' + FR_CA = 'fr-CA' + FR_FR = 'fr-FR' + IT_IT = 'it-IT' + JA_JP = 'ja-JP' + NL_NL = 'nl-NL' + PT_BR = 'pt-BR' + + +############################################################################## +# Models +############################################################################## + + +class CustomModel: + """ + Information about an existing custom model. + + :param str customization_id: The customization ID (GUID) of the custom model. + The [Create a custom model](#createcustommodel) method returns only this field. + It does not not return the other fields of this object. + :param str name: (optional) The name of the custom model. + :param str language: (optional) The language identifier of the custom model (for + example, `en-US`). + :param str owner: (optional) The GUID of the credentials for the instance of the + service that owns the custom model. + :param str created: (optional) The date and time in Coordinated Universal Time + (UTC) at which the custom model was created. The value is provided in full ISO + 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`). + :param str last_modified: (optional) The date and time in Coordinated Universal + Time (UTC) at which the custom model was last modified. The `created` and + `updated` fields are equal when a model is first added but has yet to be + updated. The value is provided in full ISO 8601 format + (`YYYY-MM-DDThh:mm:ss.sTZD`). + :param str description: (optional) The description of the custom model. + :param List[Word] words: (optional) An array of `Word` objects that lists the + words and their translations from the custom model. The words are listed in + alphabetical order, with uppercase letters listed before lowercase letters. The + array is empty if no words are defined for the custom model. This field is + returned only by the [Get a custom model](#getcustommodel) method. + :param List[Prompt] prompts: (optional) An array of `Prompt` objects that + provides information about the prompts that are defined for the specified custom + model. The array is empty if no prompts are defined for the custom model. This + field is returned only by the [Get a custom model](#getcustommodel) method. + """ + + def __init__( + self, + customization_id: str, + *, + name: Optional[str] = None, + language: Optional[str] = None, + owner: Optional[str] = None, + created: Optional[str] = None, + last_modified: Optional[str] = None, + description: Optional[str] = None, + words: Optional[List['Word']] = None, + prompts: Optional[List['Prompt']] = None, + ) -> None: + """ + Initialize a CustomModel object. + + :param str customization_id: The customization ID (GUID) of the custom + model. The [Create a custom model](#createcustommodel) method returns only + this field. It does not not return the other fields of this object. + :param str name: (optional) The name of the custom model. + :param str language: (optional) The language identifier of the custom model + (for example, `en-US`). + :param str owner: (optional) The GUID of the credentials for the instance + of the service that owns the custom model. + :param str created: (optional) The date and time in Coordinated Universal + Time (UTC) at which the custom model was created. The value is provided in + full ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`). + :param str last_modified: (optional) The date and time in Coordinated + Universal Time (UTC) at which the custom model was last modified. The + `created` and `updated` fields are equal when a model is first added but + has yet to be updated. The value is provided in full ISO 8601 format + (`YYYY-MM-DDThh:mm:ss.sTZD`). + :param str description: (optional) The description of the custom model. + :param List[Word] words: (optional) An array of `Word` objects that lists + the words and their translations from the custom model. The words are + listed in alphabetical order, with uppercase letters listed before + lowercase letters. The array is empty if no words are defined for the + custom model. This field is returned only by the [Get a custom + model](#getcustommodel) method. + :param List[Prompt] prompts: (optional) An array of `Prompt` objects that + provides information about the prompts that are defined for the specified + custom model. The array is empty if no prompts are defined for the custom + model. This field is returned only by the [Get a custom + model](#getcustommodel) method. + """ + self.customization_id = customization_id + self.name = name + self.language = language + self.owner = owner + self.created = created + self.last_modified = last_modified + self.description = description + self.words = words + self.prompts = prompts + + @classmethod + def from_dict(cls, _dict: Dict) -> 'CustomModel': + """Initialize a CustomModel object from a json dictionary.""" + args = {} + if (customization_id := _dict.get('customization_id')) is not None: + args['customization_id'] = customization_id + else: + raise ValueError( + 'Required property \'customization_id\' not present in CustomModel JSON' + ) + if (name := _dict.get('name')) is not None: + args['name'] = name + if (language := _dict.get('language')) is not None: + args['language'] = language + if (owner := _dict.get('owner')) is not None: + args['owner'] = owner + if (created := _dict.get('created')) is not None: + args['created'] = created + if (last_modified := _dict.get('last_modified')) is not None: + args['last_modified'] = last_modified + if (description := _dict.get('description')) is not None: + args['description'] = description + if (words := _dict.get('words')) is not None: + args['words'] = [Word.from_dict(v) for v in words] + if (prompts := _dict.get('prompts')) is not None: + args['prompts'] = [Prompt.from_dict(v) for v in prompts] + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a CustomModel object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, + 'customization_id') and self.customization_id is not None: + _dict['customization_id'] = self.customization_id + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'language') and self.language is not None: + _dict['language'] = self.language + if hasattr(self, 'owner') and self.owner is not None: + _dict['owner'] = self.owner + if hasattr(self, 'created') and self.created is not None: + _dict['created'] = self.created + if hasattr(self, 'last_modified') and self.last_modified is not None: + _dict['last_modified'] = self.last_modified + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'words') and self.words is not None: + words_list = [] + for v in self.words: + if isinstance(v, dict): + words_list.append(v) + else: + words_list.append(v.to_dict()) + _dict['words'] = words_list + if hasattr(self, 'prompts') and self.prompts is not None: + prompts_list = [] + for v in self.prompts: + if isinstance(v, dict): + prompts_list.append(v) + else: + prompts_list.append(v.to_dict()) + _dict['prompts'] = prompts_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this CustomModel object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'CustomModel') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'CustomModel') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class CustomModels: + """ + Information about existing custom models. + + :param List[CustomModel] customizations: An array of `CustomModel` objects that + provides information about each available custom model. The array is empty if + the requesting credentials own no custom models (if no language is specified) or + own no custom models for the specified language. + """ + + def __init__( + self, + customizations: List['CustomModel'], + ) -> None: + """ + Initialize a CustomModels object. + + :param List[CustomModel] customizations: An array of `CustomModel` objects + that provides information about each available custom model. The array is + empty if the requesting credentials own no custom models (if no language is + specified) or own no custom models for the specified language. + """ + self.customizations = customizations + + @classmethod + def from_dict(cls, _dict: Dict) -> 'CustomModels': + """Initialize a CustomModels object from a json dictionary.""" + args = {} + if (customizations := _dict.get('customizations')) is not None: + args['customizations'] = [ + CustomModel.from_dict(v) for v in customizations + ] + else: + raise ValueError( + 'Required property \'customizations\' not present in CustomModels JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a CustomModels object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'customizations') and self.customizations is not None: + customizations_list = [] + for v in self.customizations: + if isinstance(v, dict): + customizations_list.append(v) + else: + customizations_list.append(v.to_dict()) + _dict['customizations'] = customizations_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this CustomModels object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'CustomModels') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'CustomModels') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Prompt: + """ + Information about a custom prompt. + + :param str prompt: The user-specified text of the prompt. + :param str prompt_id: The user-specified identifier (name) of the prompt. + :param str status: The status of the prompt: + * `processing`: The service received the request to add the prompt and is + analyzing the validity of the prompt. + * `available`: The service successfully validated the prompt, which is now ready + for use in a speech synthesis request. + * `failed`: The service's validation of the prompt failed. The status of the + prompt includes an `error` field that describes the reason for the failure. + :param str error: (optional) If the status of the prompt is `failed`, an error + message that describes the reason for the failure. The field is omitted if no + error occurred. + :param str speaker_id: (optional) The speaker ID (GUID) of the speaker for which + the prompt was defined. The field is omitted if no speaker ID was specified. + """ + + def __init__( + self, + prompt: str, + prompt_id: str, + status: str, + *, + error: Optional[str] = None, + speaker_id: Optional[str] = None, + ) -> None: + """ + Initialize a Prompt object. + + :param str prompt: The user-specified text of the prompt. + :param str prompt_id: The user-specified identifier (name) of the prompt. + :param str status: The status of the prompt: + * `processing`: The service received the request to add the prompt and is + analyzing the validity of the prompt. + * `available`: The service successfully validated the prompt, which is now + ready for use in a speech synthesis request. + * `failed`: The service's validation of the prompt failed. The status of + the prompt includes an `error` field that describes the reason for the + failure. + :param str error: (optional) If the status of the prompt is `failed`, an + error message that describes the reason for the failure. The field is + omitted if no error occurred. + :param str speaker_id: (optional) The speaker ID (GUID) of the speaker for + which the prompt was defined. The field is omitted if no speaker ID was + specified. + """ + self.prompt = prompt + self.prompt_id = prompt_id + self.status = status + self.error = error + self.speaker_id = speaker_id + + @classmethod + def from_dict(cls, _dict: Dict) -> 'Prompt': + """Initialize a Prompt object from a json dictionary.""" + args = {} + if (prompt := _dict.get('prompt')) is not None: + args['prompt'] = prompt + else: + raise ValueError( + 'Required property \'prompt\' not present in Prompt JSON') + if (prompt_id := _dict.get('prompt_id')) is not None: + args['prompt_id'] = prompt_id + else: + raise ValueError( + 'Required property \'prompt_id\' not present in Prompt JSON') + if (status := _dict.get('status')) is not None: + args['status'] = status + else: + raise ValueError( + 'Required property \'status\' not present in Prompt JSON') + if (error := _dict.get('error')) is not None: + args['error'] = error + if (speaker_id := _dict.get('speaker_id')) is not None: + args['speaker_id'] = speaker_id + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Prompt object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'prompt') and self.prompt is not None: + _dict['prompt'] = self.prompt + if hasattr(self, 'prompt_id') and self.prompt_id is not None: + _dict['prompt_id'] = self.prompt_id + if hasattr(self, 'status') and self.status is not None: + _dict['status'] = self.status + if hasattr(self, 'error') and self.error is not None: + _dict['error'] = self.error + if hasattr(self, 'speaker_id') and self.speaker_id is not None: + _dict['speaker_id'] = self.speaker_id + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Prompt object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'Prompt') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'Prompt') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class PromptMetadata: + """ + Information about the prompt that is to be added to a custom model. The following + example of a `PromptMetadata` object includes both the required prompt text and an + optional speaker model ID: + `{ "prompt_text": "Thank you and good-bye!", "speaker_id": + "823068b2-ed4e-11ea-b6e0-7b6456aa95cc" }`. + + :param str prompt_text: The required written text of the spoken prompt. The + length of a prompt's text is limited to a few sentences. Speaking one or two + sentences of text is the recommended limit. A prompt cannot contain more than + 1000 characters of text. Escape any XML control characters (double quotes, + single quotes, ampersands, angle brackets, and slashes) that appear in the text + of the prompt. + :param str speaker_id: (optional) The optional speaker ID (GUID) of a previously + defined speaker model that is to be associated with the prompt. + """ + + def __init__( + self, + prompt_text: str, + *, + speaker_id: Optional[str] = None, + ) -> None: + """ + Initialize a PromptMetadata object. + + :param str prompt_text: The required written text of the spoken prompt. The + length of a prompt's text is limited to a few sentences. Speaking one or + two sentences of text is the recommended limit. A prompt cannot contain + more than 1000 characters of text. Escape any XML control characters + (double quotes, single quotes, ampersands, angle brackets, and slashes) + that appear in the text of the prompt. + :param str speaker_id: (optional) The optional speaker ID (GUID) of a + previously defined speaker model that is to be associated with the prompt. + """ + self.prompt_text = prompt_text + self.speaker_id = speaker_id + + @classmethod + def from_dict(cls, _dict: Dict) -> 'PromptMetadata': + """Initialize a PromptMetadata object from a json dictionary.""" + args = {} + if (prompt_text := _dict.get('prompt_text')) is not None: + args['prompt_text'] = prompt_text + else: + raise ValueError( + 'Required property \'prompt_text\' not present in PromptMetadata JSON' + ) + if (speaker_id := _dict.get('speaker_id')) is not None: + args['speaker_id'] = speaker_id + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a PromptMetadata object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'prompt_text') and self.prompt_text is not None: + _dict['prompt_text'] = self.prompt_text + if hasattr(self, 'speaker_id') and self.speaker_id is not None: + _dict['speaker_id'] = self.speaker_id + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this PromptMetadata object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'PromptMetadata') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'PromptMetadata') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Prompts: + """ + Information about the custom prompts that are defined for a custom model. + + :param List[Prompt] prompts: An array of `Prompt` objects that provides + information about the prompts that are defined for the specified custom model. + The array is empty if no prompts are defined for the custom model. + """ + + def __init__( + self, + prompts: List['Prompt'], + ) -> None: + """ + Initialize a Prompts object. + + :param List[Prompt] prompts: An array of `Prompt` objects that provides + information about the prompts that are defined for the specified custom + model. The array is empty if no prompts are defined for the custom model. + """ + self.prompts = prompts + + @classmethod + def from_dict(cls, _dict: Dict) -> 'Prompts': + """Initialize a Prompts object from a json dictionary.""" + args = {} + if (prompts := _dict.get('prompts')) is not None: + args['prompts'] = [Prompt.from_dict(v) for v in prompts] + else: + raise ValueError( + 'Required property \'prompts\' not present in Prompts JSON') + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Prompts object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'prompts') and self.prompts is not None: + prompts_list = [] + for v in self.prompts: + if isinstance(v, dict): + prompts_list.append(v) + else: + prompts_list.append(v.to_dict()) + _dict['prompts'] = prompts_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Prompts object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'Prompts') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'Prompts') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Pronunciation: + """ + The pronunciation of the specified text. + + :param str pronunciation: The pronunciation of the specified text in the + requested voice and format. If a custom model is specified, the pronunciation + also reflects that custom model. + """ + + def __init__( + self, + pronunciation: str, + ) -> None: + """ + Initialize a Pronunciation object. + + :param str pronunciation: The pronunciation of the specified text in the + requested voice and format. If a custom model is specified, the + pronunciation also reflects that custom model. + """ + self.pronunciation = pronunciation + + @classmethod + def from_dict(cls, _dict: Dict) -> 'Pronunciation': + """Initialize a Pronunciation object from a json dictionary.""" + args = {} + if (pronunciation := _dict.get('pronunciation')) is not None: + args['pronunciation'] = pronunciation + else: + raise ValueError( + 'Required property \'pronunciation\' not present in Pronunciation JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Pronunciation object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'pronunciation') and self.pronunciation is not None: + _dict['pronunciation'] = self.pronunciation + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Pronunciation object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'Pronunciation') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'Pronunciation') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Speaker: + """ + Information about a speaker model. + + :param str speaker_id: The speaker ID (GUID) of the speaker. + :param str name: The user-defined name of the speaker. + """ + + def __init__( + self, + speaker_id: str, + name: str, + ) -> None: + """ + Initialize a Speaker object. + + :param str speaker_id: The speaker ID (GUID) of the speaker. + :param str name: The user-defined name of the speaker. + """ + self.speaker_id = speaker_id + self.name = name + + @classmethod + def from_dict(cls, _dict: Dict) -> 'Speaker': + """Initialize a Speaker object from a json dictionary.""" + args = {} + if (speaker_id := _dict.get('speaker_id')) is not None: + args['speaker_id'] = speaker_id + else: + raise ValueError( + 'Required property \'speaker_id\' not present in Speaker JSON') + if (name := _dict.get('name')) is not None: + args['name'] = name + else: + raise ValueError( + 'Required property \'name\' not present in Speaker JSON') + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Speaker object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'speaker_id') and self.speaker_id is not None: + _dict['speaker_id'] = self.speaker_id + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Speaker object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'Speaker') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'Speaker') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class SpeakerCustomModel: + """ + A custom models for which the speaker has defined prompts. + + :param str customization_id: The customization ID (GUID) of a custom model for + which the speaker has defined one or more prompts. + :param List[SpeakerPrompt] prompts: An array of `SpeakerPrompt` objects that + provides information about each prompt that the user has defined for the custom + model. + """ + + def __init__( + self, + customization_id: str, + prompts: List['SpeakerPrompt'], + ) -> None: + """ + Initialize a SpeakerCustomModel object. + + :param str customization_id: The customization ID (GUID) of a custom model + for which the speaker has defined one or more prompts. + :param List[SpeakerPrompt] prompts: An array of `SpeakerPrompt` objects + that provides information about each prompt that the user has defined for + the custom model. + """ + self.customization_id = customization_id + self.prompts = prompts + + @classmethod + def from_dict(cls, _dict: Dict) -> 'SpeakerCustomModel': + """Initialize a SpeakerCustomModel object from a json dictionary.""" + args = {} + if (customization_id := _dict.get('customization_id')) is not None: + args['customization_id'] = customization_id + else: + raise ValueError( + 'Required property \'customization_id\' not present in SpeakerCustomModel JSON' + ) + if (prompts := _dict.get('prompts')) is not None: + args['prompts'] = [SpeakerPrompt.from_dict(v) for v in prompts] + else: + raise ValueError( + 'Required property \'prompts\' not present in SpeakerCustomModel JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SpeakerCustomModel object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, + 'customization_id') and self.customization_id is not None: + _dict['customization_id'] = self.customization_id + if hasattr(self, 'prompts') and self.prompts is not None: + prompts_list = [] + for v in self.prompts: + if isinstance(v, dict): + prompts_list.append(v) + else: + prompts_list.append(v.to_dict()) + _dict['prompts'] = prompts_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this SpeakerCustomModel object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'SpeakerCustomModel') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'SpeakerCustomModel') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class SpeakerCustomModels: + """ + Custom models for which the speaker has defined prompts. + + :param List[SpeakerCustomModel] customizations: An array of `SpeakerCustomModel` + objects. Each object provides information about the prompts that are defined for + a specified speaker in the custom models that are owned by a specified service + instance. The array is empty if no prompts are defined for the speaker. + """ + + def __init__( + self, + customizations: List['SpeakerCustomModel'], + ) -> None: + """ + Initialize a SpeakerCustomModels object. + + :param List[SpeakerCustomModel] customizations: An array of + `SpeakerCustomModel` objects. Each object provides information about the + prompts that are defined for a specified speaker in the custom models that + are owned by a specified service instance. The array is empty if no prompts + are defined for the speaker. + """ + self.customizations = customizations + + @classmethod + def from_dict(cls, _dict: Dict) -> 'SpeakerCustomModels': + """Initialize a SpeakerCustomModels object from a json dictionary.""" + args = {} + if (customizations := _dict.get('customizations')) is not None: + args['customizations'] = [ + SpeakerCustomModel.from_dict(v) for v in customizations + ] + else: + raise ValueError( + 'Required property \'customizations\' not present in SpeakerCustomModels JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SpeakerCustomModels object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'customizations') and self.customizations is not None: + customizations_list = [] + for v in self.customizations: + if isinstance(v, dict): + customizations_list.append(v) + else: + customizations_list.append(v.to_dict()) + _dict['customizations'] = customizations_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this SpeakerCustomModels object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'SpeakerCustomModels') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'SpeakerCustomModels') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + - Deletes all data that is associated with a specified customer ID. The method - deletes all data for the customer ID, regardless of the method by which the - information was added. The method has no effect if no data is associated with the - customer ID. You must issue the request with credentials for the same instance of - the service that was used to associate the customer ID with the data. - You associate a customer ID with data by passing the `X-Watson-Metadata` header - with a request that passes the data. - **See also:** [Information - security](https://cloud.ibm.com/docs/services/text-to-speech/information-security.html). +class SpeakerModel: + """ + The speaker ID of the speaker model. - :param str customer_id: The customer ID for which all data is to be deleted. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse + :param str speaker_id: The speaker ID (GUID) of the speaker model. + """ + + def __init__( + self, + speaker_id: str, + ) -> None: """ + Initialize a SpeakerModel object. - if customer_id is None: - raise ValueError('customer_id must be provided') + :param str speaker_id: The speaker ID (GUID) of the speaker model. + """ + self.speaker_id = speaker_id - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('text_to_speech', 'V1', - 'delete_user_data') - headers.update(sdk_headers) + @classmethod + def from_dict(cls, _dict: Dict) -> 'SpeakerModel': + """Initialize a SpeakerModel object from a json dictionary.""" + args = {} + if (speaker_id := _dict.get('speaker_id')) is not None: + args['speaker_id'] = speaker_id + else: + raise ValueError( + 'Required property \'speaker_id\' not present in SpeakerModel JSON' + ) + return cls(**args) - params = {'customer_id': customer_id} + @classmethod + def _from_dict(cls, _dict): + """Initialize a SpeakerModel object from a json dictionary.""" + return cls.from_dict(_dict) - url = '/v1/user_data' - response = self.request( - method='DELETE', - url=url, - headers=headers, - params=params, - accept_json=False) - return response + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'speaker_id') and self.speaker_id is not None: + _dict['speaker_id'] = self.speaker_id + return _dict + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() -############################################################################## -# Models -############################################################################## + def __str__(self) -> str: + """Return a `str` version of this SpeakerModel object.""" + return json.dumps(self.to_dict(), indent=2) + def __eq__(self, other: 'SpeakerModel') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'SpeakerModel') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other -class Pronunciation(object): - """ - Pronunciation. - :attr str pronunciation: The pronunciation of the specified text in the requested - voice and format. If a custom voice model is specified, the pronunciation also - reflects that custom voice. +class SpeakerPrompt: + """ + A prompt that a speaker has defined for a custom model. + + :param str prompt: The user-specified text of the prompt. + :param str prompt_id: The user-specified identifier (name) of the prompt. + :param str status: The status of the prompt: + * `processing`: The service received the request to add the prompt and is + analyzing the validity of the prompt. + * `available`: The service successfully validated the prompt, which is now ready + for use in a speech synthesis request. + * `failed`: The service's validation of the prompt failed. The status of the + prompt includes an `error` field that describes the reason for the failure. + :param str error: (optional) If the status of the prompt is `failed`, an error + message that describes the reason for the failure. The field is omitted if no + error occurred. """ - def __init__(self, pronunciation): + def __init__( + self, + prompt: str, + prompt_id: str, + status: str, + *, + error: Optional[str] = None, + ) -> None: """ - Initialize a Pronunciation object. - - :param str pronunciation: The pronunciation of the specified text in the requested - voice and format. If a custom voice model is specified, the pronunciation also - reflects that custom voice. + Initialize a SpeakerPrompt object. + + :param str prompt: The user-specified text of the prompt. + :param str prompt_id: The user-specified identifier (name) of the prompt. + :param str status: The status of the prompt: + * `processing`: The service received the request to add the prompt and is + analyzing the validity of the prompt. + * `available`: The service successfully validated the prompt, which is now + ready for use in a speech synthesis request. + * `failed`: The service's validation of the prompt failed. The status of + the prompt includes an `error` field that describes the reason for the + failure. + :param str error: (optional) If the status of the prompt is `failed`, an + error message that describes the reason for the failure. The field is + omitted if no error occurred. """ - self.pronunciation = pronunciation + self.prompt = prompt + self.prompt_id = prompt_id + self.status = status + self.error = error @classmethod - def _from_dict(cls, _dict): - """Initialize a Pronunciation object from a json dictionary.""" + def from_dict(cls, _dict: Dict) -> 'SpeakerPrompt': + """Initialize a SpeakerPrompt object from a json dictionary.""" args = {} - if 'pronunciation' in _dict: - args['pronunciation'] = _dict.get('pronunciation') + if (prompt := _dict.get('prompt')) is not None: + args['prompt'] = prompt else: raise ValueError( - 'Required property \'pronunciation\' not present in Pronunciation JSON' + 'Required property \'prompt\' not present in SpeakerPrompt JSON' ) + if (prompt_id := _dict.get('prompt_id')) is not None: + args['prompt_id'] = prompt_id + else: + raise ValueError( + 'Required property \'prompt_id\' not present in SpeakerPrompt JSON' + ) + if (status := _dict.get('status')) is not None: + args['status'] = status + else: + raise ValueError( + 'Required property \'status\' not present in SpeakerPrompt JSON' + ) + if (error := _dict.get('error')) is not None: + args['error'] = error return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a SpeakerPrompt object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'pronunciation') and self.pronunciation is not None: - _dict['pronunciation'] = self.pronunciation + if hasattr(self, 'prompt') and self.prompt is not None: + _dict['prompt'] = self.prompt + if hasattr(self, 'prompt_id') and self.prompt_id is not None: + _dict['prompt_id'] = self.prompt_id + if hasattr(self, 'status') and self.status is not None: + _dict['status'] = self.status + if hasattr(self, 'error') and self.error is not None: + _dict['error'] = self.error return _dict - def __str__(self): - """Return a `str` version of this Pronunciation object.""" - return json.dumps(self._to_dict(), indent=2) + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this SpeakerPrompt object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'SpeakerPrompt') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'SpeakerPrompt') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class SupportedFeatures(object): +class Speakers: """ - Describes the additional service features that are supported with the voice. + Information about all speaker models for the service instance. + + :param List[Speaker] speakers: An array of `Speaker` objects that provides + information about the speakers for the service instance. The array is empty if + the service instance has no speakers. + """ + + def __init__( + self, + speakers: List['Speaker'], + ) -> None: + """ + Initialize a Speakers object. + + :param List[Speaker] speakers: An array of `Speaker` objects that provides + information about the speakers for the service instance. The array is empty + if the service instance has no speakers. + """ + self.speakers = speakers + + @classmethod + def from_dict(cls, _dict: Dict) -> 'Speakers': + """Initialize a Speakers object from a json dictionary.""" + args = {} + if (speakers := _dict.get('speakers')) is not None: + args['speakers'] = [Speaker.from_dict(v) for v in speakers] + else: + raise ValueError( + 'Required property \'speakers\' not present in Speakers JSON') + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Speakers object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'speakers') and self.speakers is not None: + speakers_list = [] + for v in self.speakers: + if isinstance(v, dict): + speakers_list.append(v) + else: + speakers_list.append(v.to_dict()) + _dict['speakers'] = speakers_list + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this Speakers object.""" + return json.dumps(self.to_dict(), indent=2) - :attr bool custom_pronunciation: If `true`, the voice can be customized; if `false`, - the voice cannot be customized. (Same as `customizable`.). - :attr bool voice_transformation: If `true`, the voice can be transformed by using the - SSML <voice-transformation> element; if `false`, the voice cannot be - transformed. + def __eq__(self, other: 'Speakers') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'Speakers') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class SupportedFeatures: + """ + Additional service features that are supported with the voice. + + :param bool custom_pronunciation: If `true`, the voice can be customized; if + `false`, the voice cannot be customized. (Same as `customizable`.). + :param bool voice_transformation: If `true`, the voice can be transformed by + using the SSML `` element; if `false`, the voice cannot be + transformed. **Note:** The SSML `` element is obsolete. + You can no longer use the element with any supported voice. """ - def __init__(self, custom_pronunciation, voice_transformation): + def __init__( + self, + custom_pronunciation: bool, + voice_transformation: bool, + ) -> None: """ Initialize a SupportedFeatures object. - :param bool custom_pronunciation: If `true`, the voice can be customized; if - `false`, the voice cannot be customized. (Same as `customizable`.). - :param bool voice_transformation: If `true`, the voice can be transformed by using - the SSML <voice-transformation> element; if `false`, the voice cannot be - transformed. + :param bool custom_pronunciation: If `true`, the voice can be customized; + if `false`, the voice cannot be customized. (Same as `customizable`.). + :param bool voice_transformation: If `true`, the voice can be transformed + by using the SSML `` element; if `false`, the voice + cannot be transformed. **Note:** The SSML `` element + is obsolete. You can no longer use the element with any supported voice. """ self.custom_pronunciation = custom_pronunciation self.voice_transformation = voice_transformation @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'SupportedFeatures': """Initialize a SupportedFeatures object from a json dictionary.""" args = {} - if 'custom_pronunciation' in _dict: - args['custom_pronunciation'] = _dict.get('custom_pronunciation') + if (custom_pronunciation := + _dict.get('custom_pronunciation')) is not None: + args['custom_pronunciation'] = custom_pronunciation else: raise ValueError( 'Required property \'custom_pronunciation\' not present in SupportedFeatures JSON' ) - if 'voice_transformation' in _dict: - args['voice_transformation'] = _dict.get('voice_transformation') + if (voice_transformation := + _dict.get('voice_transformation')) is not None: + args['voice_transformation'] = voice_transformation else: raise ValueError( 'Required property \'voice_transformation\' not present in SupportedFeatures JSON' ) return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a SupportedFeatures object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'custom_pronunciation' @@ -1004,70 +3196,86 @@ def _to_dict(self): _dict['voice_transformation'] = self.voice_transformation return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this SupportedFeatures object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'SupportedFeatures') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'SupportedFeatures') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class Translation(object): +class Translation: """ - Translation. - - :attr str translation: The phonetic or sounds-like translation for the word. A - phonetic translation is based on the SSML format for representing the phonetic string - of a word either as an IPA translation or as an IBM SPR translation. A sounds-like is - one or more words that, when combined, sound like the word. - :attr str part_of_speech: (optional) **Japanese only.** The part of speech for the - word. The service uses the value to produce the correct intonation for the word. You - can create only a single entry, with or without a single part of speech, for any word; - you cannot create multiple entries with different parts of speech for the same word. - For more information, see [Working with Japanese - entries](https://cloud.ibm.com/docs/services/text-to-speech/custom-rules.html#jaNotes). + Information about the translation for the specified text. + + :param str translation: The phonetic or sounds-like translation for the word. A + phonetic translation is based on the SSML format for representing the phonetic + string of a word either as an IPA translation or as an IBM SPR translation. A + sounds-like is one or more words that, when combined, sound like the word. + :param str part_of_speech: (optional) **Japanese only.** The part of speech for + the word. The service uses the value to produce the correct intonation for the + word. You can create only a single entry, with or without a single part of + speech, for any word; you cannot create multiple entries with different parts of + speech for the same word. For more information, see [Working with Japanese + entries](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-rules#jaNotes). """ - def __init__(self, translation, part_of_speech=None): + def __init__( + self, + translation: str, + *, + part_of_speech: Optional[str] = None, + ) -> None: """ Initialize a Translation object. - :param str translation: The phonetic or sounds-like translation for the word. A - phonetic translation is based on the SSML format for representing the phonetic - string of a word either as an IPA translation or as an IBM SPR translation. A - sounds-like is one or more words that, when combined, sound like the word. - :param str part_of_speech: (optional) **Japanese only.** The part of speech for - the word. The service uses the value to produce the correct intonation for the - word. You can create only a single entry, with or without a single part of speech, - for any word; you cannot create multiple entries with different parts of speech - for the same word. For more information, see [Working with Japanese - entries](https://cloud.ibm.com/docs/services/text-to-speech/custom-rules.html#jaNotes). + :param str translation: The phonetic or sounds-like translation for the + word. A phonetic translation is based on the SSML format for representing + the phonetic string of a word either as an IPA translation or as an IBM SPR + translation. A sounds-like is one or more words that, when combined, sound + like the word. + :param str part_of_speech: (optional) **Japanese only.** The part of speech + for the word. The service uses the value to produce the correct intonation + for the word. You can create only a single entry, with or without a single + part of speech, for any word; you cannot create multiple entries with + different parts of speech for the same word. For more information, see + [Working with Japanese + entries](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-rules#jaNotes). """ self.translation = translation self.part_of_speech = part_of_speech @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'Translation': """Initialize a Translation object from a json dictionary.""" args = {} - if 'translation' in _dict: - args['translation'] = _dict.get('translation') + if (translation := _dict.get('translation')) is not None: + args['translation'] = translation else: raise ValueError( 'Required property \'translation\' not present in Translation JSON' ) - if 'part_of_speech' in _dict: - args['part_of_speech'] = _dict.get('part_of_speech') + if (part_of_speech := _dict.get('part_of_speech')) is not None: + args['part_of_speech'] = part_of_speech return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a Translation object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'translation') and self.translation is not None: @@ -1076,67 +3284,106 @@ def _to_dict(self): _dict['part_of_speech'] = self.part_of_speech return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this Translation object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'Translation') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'Translation') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other + class PartOfSpeechEnum(str, Enum): + """ + **Japanese only.** The part of speech for the word. The service uses the value to + produce the correct intonation for the word. You can create only a single entry, + with or without a single part of speech, for any word; you cannot create multiple + entries with different parts of speech for the same word. For more information, + see [Working with Japanese + entries](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-rules#jaNotes). + """ -class Voice(object): + DOSI = 'Dosi' + FUKU = 'Fuku' + GOBI = 'Gobi' + HOKA = 'Hoka' + JODO = 'Jodo' + JOSI = 'Josi' + KATO = 'Kato' + KEDO = 'Kedo' + KEYO = 'Keyo' + KIGO = 'Kigo' + KOYU = 'Koyu' + MESI = 'Mesi' + RETA = 'Reta' + STBI = 'Stbi' + STTO = 'Stto' + STZO = 'Stzo' + SUJI = 'Suji' + + +class Voice: """ - Voice. - - :attr str url: The URI of the voice. - :attr str gender: The gender of the voice: `male` or `female`. - :attr str name: The name of the voice. Use this as the voice identifier in all - requests. - :attr str language: The language and region of the voice (for example, `en-US`). - :attr str description: A textual description of the voice. - :attr bool customizable: If `true`, the voice can be customized; if `false`, the voice - cannot be customized. (Same as `custom_pronunciation`; maintained for backward - compatibility.). - :attr SupportedFeatures supported_features: Describes the additional service features - that are supported with the voice. - :attr VoiceModel customization: (optional) Returns information about a specified - custom voice model. This field is returned only by the **Get a voice** method and only - when you specify the customization ID of a custom voice model. + Information about an available voice. + + :param str url: The URI of the voice. + :param str gender: The gender of the voice: `male` or `female`. + :param str name: The name of the voice. Use this as the voice identifier in all + requests. + :param str language: The language and region of the voice (for example, + `en-US`). + :param str description: A textual description of the voice. + :param bool customizable: If `true`, the voice can be customized; if `false`, + the voice cannot be customized. (Same as `custom_pronunciation`; maintained for + backward compatibility.). + :param SupportedFeatures supported_features: Additional service features that + are supported with the voice. + :param CustomModel customization: (optional) Returns information about a + specified custom model. This field is returned only by the [Get a + voice](#getvoice) method and only when you specify the customization ID of a + custom model. """ - def __init__(self, - url, - gender, - name, - language, - description, - customizable, - supported_features, - customization=None): + def __init__( + self, + url: str, + gender: str, + name: str, + language: str, + description: str, + customizable: bool, + supported_features: 'SupportedFeatures', + *, + customization: Optional['CustomModel'] = None, + ) -> None: """ Initialize a Voice object. :param str url: The URI of the voice. :param str gender: The gender of the voice: `male` or `female`. - :param str name: The name of the voice. Use this as the voice identifier in all - requests. - :param str language: The language and region of the voice (for example, `en-US`). + :param str name: The name of the voice. Use this as the voice identifier in + all requests. + :param str language: The language and region of the voice (for example, + `en-US`). :param str description: A textual description of the voice. - :param bool customizable: If `true`, the voice can be customized; if `false`, the - voice cannot be customized. (Same as `custom_pronunciation`; maintained for - backward compatibility.). - :param SupportedFeatures supported_features: Describes the additional service - features that are supported with the voice. - :param VoiceModel customization: (optional) Returns information about a specified - custom voice model. This field is returned only by the **Get a voice** method and - only when you specify the customization ID of a custom voice model. + :param bool customizable: If `true`, the voice can be customized; if + `false`, the voice cannot be customized. (Same as `custom_pronunciation`; + maintained for backward compatibility.). + :param SupportedFeatures supported_features: Additional service features + that are supported with the voice. + :param CustomModel customization: (optional) Returns information about a + specified custom model. This field is returned only by the [Get a + voice](#getvoice) method and only when you specify the customization ID of + a custom model. """ self.url = url self.gender = gender @@ -1148,52 +3395,56 @@ def __init__(self, self.customization = customization @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'Voice': """Initialize a Voice object from a json dictionary.""" args = {} - if 'url' in _dict: - args['url'] = _dict.get('url') + if (url := _dict.get('url')) is not None: + args['url'] = url else: raise ValueError( 'Required property \'url\' not present in Voice JSON') - if 'gender' in _dict: - args['gender'] = _dict.get('gender') + if (gender := _dict.get('gender')) is not None: + args['gender'] = gender else: raise ValueError( 'Required property \'gender\' not present in Voice JSON') - if 'name' in _dict: - args['name'] = _dict.get('name') + if (name := _dict.get('name')) is not None: + args['name'] = name else: raise ValueError( 'Required property \'name\' not present in Voice JSON') - if 'language' in _dict: - args['language'] = _dict.get('language') + if (language := _dict.get('language')) is not None: + args['language'] = language else: raise ValueError( 'Required property \'language\' not present in Voice JSON') - if 'description' in _dict: - args['description'] = _dict.get('description') + if (description := _dict.get('description')) is not None: + args['description'] = description else: raise ValueError( 'Required property \'description\' not present in Voice JSON') - if 'customizable' in _dict: - args['customizable'] = _dict.get('customizable') + if (customizable := _dict.get('customizable')) is not None: + args['customizable'] = customizable else: raise ValueError( 'Required property \'customizable\' not present in Voice JSON') - if 'supported_features' in _dict: - args['supported_features'] = SupportedFeatures._from_dict( - _dict.get('supported_features')) + if (supported_features := _dict.get('supported_features')) is not None: + args['supported_features'] = SupportedFeatures.from_dict( + supported_features) else: raise ValueError( 'Required property \'supported_features\' not present in Voice JSON' ) - if 'customization' in _dict: - args['customization'] = VoiceModel._from_dict( - _dict.get('customization')) + if (customization := _dict.get('customization')) is not None: + args['customization'] = CustomModel.from_dict(customization) return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a Voice object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'url') and self.url is not None: @@ -1211,326 +3462,174 @@ def _to_dict(self): if hasattr( self, 'supported_features') and self.supported_features is not None: - _dict['supported_features'] = self.supported_features._to_dict() + if isinstance(self.supported_features, dict): + _dict['supported_features'] = self.supported_features + else: + _dict['supported_features'] = self.supported_features.to_dict() if hasattr(self, 'customization') and self.customization is not None: - _dict['customization'] = self.customization._to_dict() - return _dict - - def __str__(self): - """Return a `str` version of this Voice object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class VoiceModel(object): - """ - VoiceModel. - - :attr str customization_id: The customization ID (GUID) of the custom voice model. The - **Create a custom model** method returns only this field. It does not not return the - other fields of this object. - :attr str name: (optional) The name of the custom voice model. - :attr str language: (optional) The language identifier of the custom voice model (for - example, `en-US`). - :attr str owner: (optional) The GUID of the service credentials for the instance of - the service that owns the custom voice model. - :attr str created: (optional) The date and time in Coordinated Universal Time (UTC) at - which the custom voice model was created. The value is provided in full ISO 8601 - format (`YYYY-MM-DDThh:mm:ss.sTZD`). - :attr str last_modified: (optional) The date and time in Coordinated Universal Time - (UTC) at which the custom voice model was last modified. Equals `created` when a new - voice model is first added but has yet to be updated. The value is provided in full - ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`). - :attr str description: (optional) The description of the custom voice model. - :attr list[Word] words: (optional) An array of `Word` objects that lists the words and - their translations from the custom voice model. The words are listed in alphabetical - order, with uppercase letters listed before lowercase letters. The array is empty if - the custom model contains no words. This field is returned only by the **Get a voice** - method and only when you specify the customization ID of a custom voice model. - """ - - def __init__(self, - customization_id, - name=None, - language=None, - owner=None, - created=None, - last_modified=None, - description=None, - words=None): - """ - Initialize a VoiceModel object. - - :param str customization_id: The customization ID (GUID) of the custom voice - model. The **Create a custom model** method returns only this field. It does not - not return the other fields of this object. - :param str name: (optional) The name of the custom voice model. - :param str language: (optional) The language identifier of the custom voice model - (for example, `en-US`). - :param str owner: (optional) The GUID of the service credentials for the instance - of the service that owns the custom voice model. - :param str created: (optional) The date and time in Coordinated Universal Time - (UTC) at which the custom voice model was created. The value is provided in full - ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`). - :param str last_modified: (optional) The date and time in Coordinated Universal - Time (UTC) at which the custom voice model was last modified. Equals `created` - when a new voice model is first added but has yet to be updated. The value is - provided in full ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`). - :param str description: (optional) The description of the custom voice model. - :param list[Word] words: (optional) An array of `Word` objects that lists the - words and their translations from the custom voice model. The words are listed in - alphabetical order, with uppercase letters listed before lowercase letters. The - array is empty if the custom model contains no words. This field is returned only - by the **Get a voice** method and only when you specify the customization ID of a - custom voice model. - """ - self.customization_id = customization_id - self.name = name - self.language = language - self.owner = owner - self.created = created - self.last_modified = last_modified - self.description = description - self.words = words - - @classmethod - def _from_dict(cls, _dict): - """Initialize a VoiceModel object from a json dictionary.""" - args = {} - if 'customization_id' in _dict: - args['customization_id'] = _dict.get('customization_id') - else: - raise ValueError( - 'Required property \'customization_id\' not present in VoiceModel JSON' - ) - if 'name' in _dict: - args['name'] = _dict.get('name') - if 'language' in _dict: - args['language'] = _dict.get('language') - if 'owner' in _dict: - args['owner'] = _dict.get('owner') - if 'created' in _dict: - args['created'] = _dict.get('created') - if 'last_modified' in _dict: - args['last_modified'] = _dict.get('last_modified') - if 'description' in _dict: - args['description'] = _dict.get('description') - if 'words' in _dict: - args['words'] = [Word._from_dict(x) for x in (_dict.get('words'))] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, - 'customization_id') and self.customization_id is not None: - _dict['customization_id'] = self.customization_id - if hasattr(self, 'name') and self.name is not None: - _dict['name'] = self.name - if hasattr(self, 'language') and self.language is not None: - _dict['language'] = self.language - if hasattr(self, 'owner') and self.owner is not None: - _dict['owner'] = self.owner - if hasattr(self, 'created') and self.created is not None: - _dict['created'] = self.created - if hasattr(self, 'last_modified') and self.last_modified is not None: - _dict['last_modified'] = self.last_modified - if hasattr(self, 'description') and self.description is not None: - _dict['description'] = self.description - if hasattr(self, 'words') and self.words is not None: - _dict['words'] = [x._to_dict() for x in self.words] + if isinstance(self.customization, dict): + _dict['customization'] = self.customization + else: + _dict['customization'] = self.customization.to_dict() return _dict - def __str__(self): - """Return a `str` version of this VoiceModel object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class VoiceModels(object): - """ - VoiceModels. - - :attr list[VoiceModel] customizations: An array of `VoiceModel` objects that provides - information about each available custom voice model. The array is empty if the - requesting service credentials own no custom voice models (if no language is - specified) or own no custom voice models for the specified language. - """ - - def __init__(self, customizations): - """ - Initialize a VoiceModels object. - - :param list[VoiceModel] customizations: An array of `VoiceModel` objects that - provides information about each available custom voice model. The array is empty - if the requesting service credentials own no custom voice models (if no language - is specified) or own no custom voice models for the specified language. - """ - self.customizations = customizations - - @classmethod - def _from_dict(cls, _dict): - """Initialize a VoiceModels object from a json dictionary.""" - args = {} - if 'customizations' in _dict: - args['customizations'] = [ - VoiceModel._from_dict(x) for x in (_dict.get('customizations')) - ] - else: - raise ValueError( - 'Required property \'customizations\' not present in VoiceModels JSON' - ) - return cls(**args) - def _to_dict(self): """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'customizations') and self.customizations is not None: - _dict['customizations'] = [ - x._to_dict() for x in self.customizations - ] - return _dict + return self.to_dict() - def __str__(self): - """Return a `str` version of this VoiceModels object.""" - return json.dumps(self._to_dict(), indent=2) + def __str__(self) -> str: + """Return a `str` version of this Voice object.""" + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'Voice') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'Voice') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class Voices(object): +class Voices: """ - Voices. + Information about all available voices. - :attr list[Voice] voices: A list of available voices. + :param List[Voice] voices: A list of available voices. """ - def __init__(self, voices): + def __init__( + self, + voices: List['Voice'], + ) -> None: """ Initialize a Voices object. - :param list[Voice] voices: A list of available voices. + :param List[Voice] voices: A list of available voices. """ self.voices = voices @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'Voices': """Initialize a Voices object from a json dictionary.""" args = {} - if 'voices' in _dict: - args['voices'] = [ - Voice._from_dict(x) for x in (_dict.get('voices')) - ] + if (voices := _dict.get('voices')) is not None: + args['voices'] = [Voice.from_dict(v) for v in voices] else: raise ValueError( 'Required property \'voices\' not present in Voices JSON') return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a Voices object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'voices') and self.voices is not None: - _dict['voices'] = [x._to_dict() for x in self.voices] + voices_list = [] + for v in self.voices: + if isinstance(v, dict): + voices_list.append(v) + else: + voices_list.append(v.to_dict()) + _dict['voices'] = voices_list return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this Voices object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'Voices') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'Voices') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other -class Word(object): +class Word: """ - Word. - - :attr str word: A word from the custom voice model. - :attr str translation: The phonetic or sounds-like translation for the word. A - phonetic translation is based on the SSML format for representing the phonetic string - of a word either as an IPA or IBM SPR translation. A sounds-like translation consists - of one or more words that, when combined, sound like the word. - :attr str part_of_speech: (optional) **Japanese only.** The part of speech for the - word. The service uses the value to produce the correct intonation for the word. You - can create only a single entry, with or without a single part of speech, for any word; - you cannot create multiple entries with different parts of speech for the same word. - For more information, see [Working with Japanese - entries](https://cloud.ibm.com/docs/services/text-to-speech/custom-rules.html#jaNotes). + Information about a word for the custom model. + + :param str word: The word for the custom model. The maximum length of a word is + 49 characters. + :param str translation: The phonetic or sounds-like translation for the word. A + phonetic translation is based on the SSML format for representing the phonetic + string of a word either as an IPA or IBM SPR translation. A sounds-like + translation consists of one or more words that, when combined, sound like the + word. The maximum length of a translation is 499 characters. + :param str part_of_speech: (optional) **Japanese only.** The part of speech for + the word. The service uses the value to produce the correct intonation for the + word. You can create only a single entry, with or without a single part of + speech, for any word; you cannot create multiple entries with different parts of + speech for the same word. For more information, see [Working with Japanese + entries](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-rules#jaNotes). """ - def __init__(self, word, translation, part_of_speech=None): + def __init__( + self, + word: str, + translation: str, + *, + part_of_speech: Optional[str] = None, + ) -> None: """ Initialize a Word object. - :param str word: A word from the custom voice model. - :param str translation: The phonetic or sounds-like translation for the word. A - phonetic translation is based on the SSML format for representing the phonetic - string of a word either as an IPA or IBM SPR translation. A sounds-like - translation consists of one or more words that, when combined, sound like the - word. - :param str part_of_speech: (optional) **Japanese only.** The part of speech for - the word. The service uses the value to produce the correct intonation for the - word. You can create only a single entry, with or without a single part of speech, - for any word; you cannot create multiple entries with different parts of speech - for the same word. For more information, see [Working with Japanese - entries](https://cloud.ibm.com/docs/services/text-to-speech/custom-rules.html#jaNotes). + :param str word: The word for the custom model. The maximum length of a + word is 49 characters. + :param str translation: The phonetic or sounds-like translation for the + word. A phonetic translation is based on the SSML format for representing + the phonetic string of a word either as an IPA or IBM SPR translation. A + sounds-like translation consists of one or more words that, when combined, + sound like the word. The maximum length of a translation is 499 characters. + :param str part_of_speech: (optional) **Japanese only.** The part of speech + for the word. The service uses the value to produce the correct intonation + for the word. You can create only a single entry, with or without a single + part of speech, for any word; you cannot create multiple entries with + different parts of speech for the same word. For more information, see + [Working with Japanese + entries](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-rules#jaNotes). """ self.word = word self.translation = translation self.part_of_speech = part_of_speech @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'Word': """Initialize a Word object from a json dictionary.""" args = {} - if 'word' in _dict: - args['word'] = _dict.get('word') + if (word := _dict.get('word')) is not None: + args['word'] = word else: raise ValueError( 'Required property \'word\' not present in Word JSON') - if 'translation' in _dict: - args['translation'] = _dict.get('translation') + if (translation := _dict.get('translation')) is not None: + args['translation'] = translation else: raise ValueError( 'Required property \'translation\' not present in Word JSON') - if 'part_of_speech' in _dict: - args['part_of_speech'] = _dict.get('part_of_speech') + if (part_of_speech := _dict.get('part_of_speech')) is not None: + args['part_of_speech'] = part_of_speech return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a Word object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'word') and self.word is not None: @@ -1541,76 +3640,130 @@ def _to_dict(self): _dict['part_of_speech'] = self.part_of_speech return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this Word object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'Word') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'Word') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other + class PartOfSpeechEnum(str, Enum): + """ + **Japanese only.** The part of speech for the word. The service uses the value to + produce the correct intonation for the word. You can create only a single entry, + with or without a single part of speech, for any word; you cannot create multiple + entries with different parts of speech for the same word. For more information, + see [Working with Japanese + entries](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-rules#jaNotes). + """ -class Words(object): + DOSI = 'Dosi' + FUKU = 'Fuku' + GOBI = 'Gobi' + HOKA = 'Hoka' + JODO = 'Jodo' + JOSI = 'Josi' + KATO = 'Kato' + KEDO = 'Kedo' + KEYO = 'Keyo' + KIGO = 'Kigo' + KOYU = 'Koyu' + MESI = 'Mesi' + RETA = 'Reta' + STBI = 'Stbi' + STTO = 'Stto' + STZO = 'Stzo' + SUJI = 'Suji' + + +class Words: """ - Words. - - :attr list[Word] words: The **Add custom words** method accepts an array of `Word` - objects. Each object provides a word that is to be added or updated for the custom - voice model and the word's translation. - The **List custom words** method returns an array of `Word` objects. Each object shows - a word and its translation from the custom voice model. The words are listed in - alphabetical order, with uppercase letters listed before lowercase letters. The array - is empty if the custom model contains no words. + For the [Add custom words](#addwords) method, one or more words that are to be added + or updated for the custom model and the translation for each specified word. + For the [List custom words](#listwords) method, the words and their translations from + the custom model. + + :param List[Word] words: The [Add custom words](#addwords) method accepts an + array of `Word` objects. Each object provides a word that is to be added or + updated for the custom model and the word's translation. + The [List custom words](#listwords) method returns an array of `Word` objects. + Each object shows a word and its translation from the custom model. The words + are listed in alphabetical order, with uppercase letters listed before lowercase + letters. The array is empty if the custom model contains no words. """ - def __init__(self, words): + def __init__( + self, + words: List['Word'], + ) -> None: """ Initialize a Words object. - :param list[Word] words: The **Add custom words** method accepts an array of - `Word` objects. Each object provides a word that is to be added or updated for the - custom voice model and the word's translation. - The **List custom words** method returns an array of `Word` objects. Each object - shows a word and its translation from the custom voice model. The words are listed - in alphabetical order, with uppercase letters listed before lowercase letters. The - array is empty if the custom model contains no words. + :param List[Word] words: The [Add custom words](#addwords) method accepts + an array of `Word` objects. Each object provides a word that is to be added + or updated for the custom model and the word's translation. + The [List custom words](#listwords) method returns an array of `Word` + objects. Each object shows a word and its translation from the custom + model. The words are listed in alphabetical order, with uppercase letters + listed before lowercase letters. The array is empty if the custom model + contains no words. """ self.words = words @classmethod - def _from_dict(cls, _dict): + def from_dict(cls, _dict: Dict) -> 'Words': """Initialize a Words object from a json dictionary.""" args = {} - if 'words' in _dict: - args['words'] = [Word._from_dict(x) for x in (_dict.get('words'))] + if (words := _dict.get('words')) is not None: + args['words'] = [Word.from_dict(v) for v in words] else: raise ValueError( 'Required property \'words\' not present in Words JSON') return cls(**args) - def _to_dict(self): + @classmethod + def _from_dict(cls, _dict): + """Initialize a Words object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'words') and self.words is not None: - _dict['words'] = [x._to_dict() for x in self.words] + words_list = [] + for v in self.words: + if isinstance(v, dict): + words_list.append(v) + else: + words_list.append(v.to_dict()) + _dict['words'] = words_list return _dict - def __str__(self): + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: """Return a `str` version of this Words object.""" - return json.dumps(self._to_dict(), indent=2) + return json.dumps(self.to_dict(), indent=2) - def __eq__(self, other): + def __eq__(self, other: 'Words') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other: 'Words') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other diff --git a/ibm_watson/tone_analyzer_v3.py b/ibm_watson/tone_analyzer_v3.py deleted file mode 100644 index 4711c7680..000000000 --- a/ibm_watson/tone_analyzer_v3.py +++ /dev/null @@ -1,1087 +0,0 @@ -# coding: utf-8 - -# Copyright 2018 IBM All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -The IBM Watson™ Tone Analyzer service uses linguistic analysis to detect emotional -and language tones in written text. The service can analyze tone at both the document and -sentence levels. You can use the service to understand how your written communications are -perceived and then to improve the tone of your communications. Businesses can use the -service to learn the tone of their customers' communications and to respond to each -customer appropriately, or to understand and improve their customer conversations. -**Note:** Request logging is disabled for the Tone Analyzer service. Regardless of whether -you set the `X-Watson-Learning-Opt-Out` request header, the service does not log or retain -data from requests and responses. -""" - -from __future__ import absolute_import - -import json -from .common import get_sdk_headers -from ibm_cloud_sdk_core import BaseService - -############################################################################## -# Service -############################################################################## - - -class ToneAnalyzerV3(BaseService): - """The Tone Analyzer V3 service.""" - - default_url = 'https://gateway.watsonplatform.net/tone-analyzer/api' - - def __init__( - self, - version, - url=default_url, - username=None, - password=None, - iam_apikey=None, - iam_access_token=None, - iam_url=None, - ): - """ - Construct a new client for the Tone Analyzer service. - - :param str version: The API version date to use with the service, in - "YYYY-MM-DD" format. Whenever the API is changed in a backwards - incompatible way, a new minor version of the API is released. - The service uses the API version for the date you specify, or - the most recent version before that date. Note that you should - not programmatically specify the current date at runtime, in - case the API has been updated since your application's release. - Instead, specify a version date that is compatible with your - application, and don't change it until your application is - ready for a later version. - - :param str url: The base url to use when contacting the service (e.g. - "https://gateway.watsonplatform.net/tone-analyzer/api/tone-analyzer/api"). - The base url may differ between IBM Cloud regions. - - :param str username: The username used to authenticate with the service. - Username and password credentials are only required to run your - application locally or outside of IBM Cloud. When running on - IBM Cloud, the credentials will be automatically loaded from the - `VCAP_SERVICES` environment variable. - - :param str password: The password used to authenticate with the service. - Username and password credentials are only required to run your - application locally or outside of IBM Cloud. When running on - IBM Cloud, the credentials will be automatically loaded from the - `VCAP_SERVICES` environment variable. - - :param str iam_apikey: An API key that can be used to request IAM tokens. If - this API key is provided, the SDK will manage the token and handle the - refreshing. - - :param str iam_access_token: An IAM access token is fully managed by the application. - Responsibility falls on the application to refresh the token, either before - it expires or reactively upon receiving a 401 from the service as any requests - made with an expired token will fail. - - :param str iam_url: An optional URL for the IAM service API. Defaults to - 'https://iam.cloud.ibm.com/identity/token'. - """ - - BaseService.__init__( - self, - vcap_services_name='tone_analyzer', - url=url, - username=username, - password=password, - iam_apikey=iam_apikey, - iam_access_token=iam_access_token, - iam_url=iam_url, - use_vcap_services=True, - display_name='Tone Analyzer') - self.version = version - - ######################### - # Methods - ######################### - - def tone(self, - tone_input, - sentences=None, - tones=None, - content_language=None, - accept_language=None, - content_type=None, - **kwargs): - """ - Analyze general tone. - - Use the general purpose endpoint to analyze the tone of your input content. The - service analyzes the content for emotional and language tones. The method always - analyzes the tone of the full document; by default, it also analyzes the tone of - each individual sentence of the content. - You can submit no more than 128 KB of total input content and no more than 1000 - individual sentences in JSON, plain text, or HTML format. The service analyzes the - first 1000 sentences for document-level analysis and only the first 100 sentences - for sentence-level analysis. - Per the JSON specification, the default character encoding for JSON content is - effectively always UTF-8; per the HTTP specification, the default encoding for - plain text and HTML is ISO-8859-1 (effectively, the ASCII character set). When - specifying a content type of plain text or HTML, include the `charset` parameter - to indicate the character encoding of the input text; for example: `Content-Type: - text/plain;charset=utf-8`. For `text/html`, the service removes HTML tags and - analyzes only the textual content. - **See also:** [Using the general-purpose - endpoint](https://cloud.ibm.com/docs/services/tone-analyzer/using-tone.html#using-the-general-purpose-endpoint). - - :param ToneInput tone_input: JSON, plain text, or HTML input that contains the - content to be analyzed. For JSON input, provide an object of type `ToneInput`. - :param bool sentences: Indicates whether the service is to return an analysis of - each individual sentence in addition to its analysis of the full document. If - `true` (the default), the service returns results for each sentence. - :param list[str] tones: **`2017-09-21`:** Deprecated. The service continues to - accept the parameter for backward-compatibility, but the parameter no longer - affects the response. - **`2016-05-19`:** A comma-separated list of tones for which the service is to - return its analysis of the input; the indicated tones apply both to the full - document and to individual sentences of the document. You can specify one or more - of the valid values. Omit the parameter to request results for all three tones. - :param str content_language: The language of the input text for the request: - English or French. Regional variants are treated as their parent language; for - example, `en-US` is interpreted as `en`. The input content must match the - specified language. Do not submit content that contains both languages. You can - use different languages for **Content-Language** and **Accept-Language**. - * **`2017-09-21`:** Accepts `en` or `fr`. - * **`2016-05-19`:** Accepts only `en`. - :param str accept_language: The desired language of the response. For - two-character arguments, regional variants are treated as their parent language; - for example, `en-US` is interpreted as `en`. You can use different languages for - **Content-Language** and **Accept-Language**. - :param str content_type: The type of the input. A character encoding can be - specified by including a `charset` parameter. For example, - 'text/plain;charset=utf-8'. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if tone_input is None: - raise ValueError('tone_input must be provided') - if isinstance(tone_input, ToneInput): - tone_input = self._convert_model(tone_input, ToneInput) - - headers = { - 'Content-Language': content_language, - 'Accept-Language': accept_language, - 'Content-Type': content_type - } - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('tone_analyzer', 'V3', 'tone') - headers.update(sdk_headers) - - params = { - 'version': self.version, - 'sentences': sentences, - 'tones': self._convert_list(tones) - } - - if content_type == 'application/json' and isinstance(tone_input, dict): - data = json.dumps(tone_input) - else: - data = tone_input - - url = '/v3/tone' - response = self.request( - method='POST', - url=url, - headers=headers, - params=params, - data=data, - accept_json=True) - return response - - def tone_chat(self, - utterances, - content_language=None, - accept_language=None, - **kwargs): - """ - Analyze customer engagement tone. - - Use the customer engagement endpoint to analyze the tone of customer service and - customer support conversations. For each utterance of a conversation, the method - reports the most prevalent subset of the following seven tones: sad, frustrated, - satisfied, excited, polite, impolite, and sympathetic. - If you submit more than 50 utterances, the service returns a warning for the - overall content and analyzes only the first 50 utterances. If you submit a single - utterance that contains more than 500 characters, the service returns an error for - that utterance and does not analyze the utterance. The request fails if all - utterances have more than 500 characters. Per the JSON specification, the default - character encoding for JSON content is effectively always UTF-8. - **See also:** [Using the customer-engagement - endpoint](https://cloud.ibm.com/docs/services/tone-analyzer/using-tone-chat.html#using-the-customer-engagement-endpoint). - - :param list[Utterance] utterances: An array of `Utterance` objects that provides - the input content that the service is to analyze. - :param str content_language: The language of the input text for the request: - English or French. Regional variants are treated as their parent language; for - example, `en-US` is interpreted as `en`. The input content must match the - specified language. Do not submit content that contains both languages. You can - use different languages for **Content-Language** and **Accept-Language**. - * **`2017-09-21`:** Accepts `en` or `fr`. - * **`2016-05-19`:** Accepts only `en`. - :param str accept_language: The desired language of the response. For - two-character arguments, regional variants are treated as their parent language; - for example, `en-US` is interpreted as `en`. You can use different languages for - **Content-Language** and **Accept-Language**. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if utterances is None: - raise ValueError('utterances must be provided') - utterances = [self._convert_model(x, Utterance) for x in utterances] - - headers = { - 'Content-Language': content_language, - 'Accept-Language': accept_language - } - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('tone_analyzer', 'V3', 'tone_chat') - headers.update(sdk_headers) - - params = {'version': self.version} - - data = {'utterances': utterances} - - url = '/v3/tone_chat' - response = self.request( - method='POST', - url=url, - headers=headers, - params=params, - json=data, - accept_json=True) - return response - - -############################################################################## -# Models -############################################################################## - - -class DocumentAnalysis(object): - """ - An object of type `DocumentAnalysis` that provides the results of the analysis for the - full input document. - - :attr list[ToneScore] tones: (optional) **`2017-09-21`:** An array of `ToneScore` - objects that provides the results of the analysis for each qualifying tone of the - document. The array includes results for any tone whose score is at least 0.5. The - array is empty if no tone has a score that meets this threshold. **`2016-05-19`:** Not - returned. - :attr list[ToneCategory] tone_categories: (optional) **`2017-09-21`:** Not returned. - **`2016-05-19`:** An array of `ToneCategory` objects that provides the results of the - tone analysis for the full document of the input content. The service returns results - only for the tones specified with the `tones` parameter of the request. - :attr str warning: (optional) **`2017-09-21`:** A warning message if the overall - content exceeds 128 KB or contains more than 1000 sentences. The service analyzes only - the first 1000 sentences for document-level analysis and the first 100 sentences for - sentence-level analysis. **`2016-05-19`:** Not returned. - """ - - def __init__(self, tones=None, tone_categories=None, warning=None): - """ - Initialize a DocumentAnalysis object. - - :param list[ToneScore] tones: (optional) **`2017-09-21`:** An array of `ToneScore` - objects that provides the results of the analysis for each qualifying tone of the - document. The array includes results for any tone whose score is at least 0.5. The - array is empty if no tone has a score that meets this threshold. **`2016-05-19`:** - Not returned. - :param list[ToneCategory] tone_categories: (optional) **`2017-09-21`:** Not - returned. **`2016-05-19`:** An array of `ToneCategory` objects that provides the - results of the tone analysis for the full document of the input content. The - service returns results only for the tones specified with the `tones` parameter of - the request. - :param str warning: (optional) **`2017-09-21`:** A warning message if the overall - content exceeds 128 KB or contains more than 1000 sentences. The service analyzes - only the first 1000 sentences for document-level analysis and the first 100 - sentences for sentence-level analysis. **`2016-05-19`:** Not returned. - """ - self.tones = tones - self.tone_categories = tone_categories - self.warning = warning - - @classmethod - def _from_dict(cls, _dict): - """Initialize a DocumentAnalysis object from a json dictionary.""" - args = {} - if 'tones' in _dict: - args['tones'] = [ - ToneScore._from_dict(x) for x in (_dict.get('tones')) - ] - if 'tone_categories' in _dict: - args['tone_categories'] = [ - ToneCategory._from_dict(x) - for x in (_dict.get('tone_categories')) - ] - if 'warning' in _dict: - args['warning'] = _dict.get('warning') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'tones') and self.tones is not None: - _dict['tones'] = [x._to_dict() for x in self.tones] - if hasattr(self, - 'tone_categories') and self.tone_categories is not None: - _dict['tone_categories'] = [ - x._to_dict() for x in self.tone_categories - ] - if hasattr(self, 'warning') and self.warning is not None: - _dict['warning'] = self.warning - return _dict - - def __str__(self): - """Return a `str` version of this DocumentAnalysis object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class SentenceAnalysis(object): - """ - SentenceAnalysis. - - :attr int sentence_id: The unique identifier of a sentence of the input content. The - first sentence has ID 0, and the ID of each subsequent sentence is incremented by one. - :attr str text: The text of the input sentence. - :attr list[ToneScore] tones: (optional) **`2017-09-21`:** An array of `ToneScore` - objects that provides the results of the analysis for each qualifying tone of the - sentence. The array includes results for any tone whose score is at least 0.5. The - array is empty if no tone has a score that meets this threshold. **`2016-05-19`:** Not - returned. - :attr list[ToneCategory] tone_categories: (optional) **`2017-09-21`:** Not returned. - **`2016-05-19`:** An array of `ToneCategory` objects that provides the results of the - tone analysis for the sentence. The service returns results only for the tones - specified with the `tones` parameter of the request. - :attr int input_from: (optional) **`2017-09-21`:** Not returned. **`2016-05-19`:** The - offset of the first character of the sentence in the overall input content. - :attr int input_to: (optional) **`2017-09-21`:** Not returned. **`2016-05-19`:** The - offset of the last character of the sentence in the overall input content. - """ - - def __init__(self, - sentence_id, - text, - tones=None, - tone_categories=None, - input_from=None, - input_to=None): - """ - Initialize a SentenceAnalysis object. - - :param int sentence_id: The unique identifier of a sentence of the input content. - The first sentence has ID 0, and the ID of each subsequent sentence is incremented - by one. - :param str text: The text of the input sentence. - :param list[ToneScore] tones: (optional) **`2017-09-21`:** An array of `ToneScore` - objects that provides the results of the analysis for each qualifying tone of the - sentence. The array includes results for any tone whose score is at least 0.5. The - array is empty if no tone has a score that meets this threshold. **`2016-05-19`:** - Not returned. - :param list[ToneCategory] tone_categories: (optional) **`2017-09-21`:** Not - returned. **`2016-05-19`:** An array of `ToneCategory` objects that provides the - results of the tone analysis for the sentence. The service returns results only - for the tones specified with the `tones` parameter of the request. - :param int input_from: (optional) **`2017-09-21`:** Not returned. - **`2016-05-19`:** The offset of the first character of the sentence in the overall - input content. - :param int input_to: (optional) **`2017-09-21`:** Not returned. **`2016-05-19`:** - The offset of the last character of the sentence in the overall input content. - """ - self.sentence_id = sentence_id - self.text = text - self.tones = tones - self.tone_categories = tone_categories - self.input_from = input_from - self.input_to = input_to - - @classmethod - def _from_dict(cls, _dict): - """Initialize a SentenceAnalysis object from a json dictionary.""" - args = {} - if 'sentence_id' in _dict: - args['sentence_id'] = _dict.get('sentence_id') - else: - raise ValueError( - 'Required property \'sentence_id\' not present in SentenceAnalysis JSON' - ) - if 'text' in _dict: - args['text'] = _dict.get('text') - else: - raise ValueError( - 'Required property \'text\' not present in SentenceAnalysis JSON' - ) - if 'tones' in _dict: - args['tones'] = [ - ToneScore._from_dict(x) for x in (_dict.get('tones')) - ] - if 'tone_categories' in _dict: - args['tone_categories'] = [ - ToneCategory._from_dict(x) - for x in (_dict.get('tone_categories')) - ] - if 'input_from' in _dict: - args['input_from'] = _dict.get('input_from') - if 'input_to' in _dict: - args['input_to'] = _dict.get('input_to') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'sentence_id') and self.sentence_id is not None: - _dict['sentence_id'] = self.sentence_id - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - if hasattr(self, 'tones') and self.tones is not None: - _dict['tones'] = [x._to_dict() for x in self.tones] - if hasattr(self, - 'tone_categories') and self.tone_categories is not None: - _dict['tone_categories'] = [ - x._to_dict() for x in self.tone_categories - ] - if hasattr(self, 'input_from') and self.input_from is not None: - _dict['input_from'] = self.input_from - if hasattr(self, 'input_to') and self.input_to is not None: - _dict['input_to'] = self.input_to - return _dict - - def __str__(self): - """Return a `str` version of this SentenceAnalysis object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class ToneAnalysis(object): - """ - ToneAnalysis. - - :attr DocumentAnalysis document_tone: An object of type `DocumentAnalysis` that - provides the results of the analysis for the full input document. - :attr list[SentenceAnalysis] sentences_tone: (optional) An array of `SentenceAnalysis` - objects that provides the results of the analysis for the individual sentences of the - input content. The service returns results only for the first 100 sentences of the - input. The field is omitted if the `sentences` parameter of the request is set to - `false`. - """ - - def __init__(self, document_tone, sentences_tone=None): - """ - Initialize a ToneAnalysis object. - - :param DocumentAnalysis document_tone: An object of type `DocumentAnalysis` that - provides the results of the analysis for the full input document. - :param list[SentenceAnalysis] sentences_tone: (optional) An array of - `SentenceAnalysis` objects that provides the results of the analysis for the - individual sentences of the input content. The service returns results only for - the first 100 sentences of the input. The field is omitted if the `sentences` - parameter of the request is set to `false`. - """ - self.document_tone = document_tone - self.sentences_tone = sentences_tone - - @classmethod - def _from_dict(cls, _dict): - """Initialize a ToneAnalysis object from a json dictionary.""" - args = {} - if 'document_tone' in _dict: - args['document_tone'] = DocumentAnalysis._from_dict( - _dict.get('document_tone')) - else: - raise ValueError( - 'Required property \'document_tone\' not present in ToneAnalysis JSON' - ) - if 'sentences_tone' in _dict: - args['sentences_tone'] = [ - SentenceAnalysis._from_dict(x) - for x in (_dict.get('sentences_tone')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'document_tone') and self.document_tone is not None: - _dict['document_tone'] = self.document_tone._to_dict() - if hasattr(self, 'sentences_tone') and self.sentences_tone is not None: - _dict['sentences_tone'] = [ - x._to_dict() for x in self.sentences_tone - ] - return _dict - - def __str__(self): - """Return a `str` version of this ToneAnalysis object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class ToneCategory(object): - """ - ToneCategory. - - :attr list[ToneScore] tones: An array of `ToneScore` objects that provides the results - for the tones of the category. - :attr str category_id: The unique, non-localized identifier of the category for the - results. The service can return results for the following category IDs: - `emotion_tone`, `language_tone`, and `social_tone`. - :attr str category_name: The user-visible, localized name of the category. - """ - - def __init__(self, tones, category_id, category_name): - """ - Initialize a ToneCategory object. - - :param list[ToneScore] tones: An array of `ToneScore` objects that provides the - results for the tones of the category. - :param str category_id: The unique, non-localized identifier of the category for - the results. The service can return results for the following category IDs: - `emotion_tone`, `language_tone`, and `social_tone`. - :param str category_name: The user-visible, localized name of the category. - """ - self.tones = tones - self.category_id = category_id - self.category_name = category_name - - @classmethod - def _from_dict(cls, _dict): - """Initialize a ToneCategory object from a json dictionary.""" - args = {} - if 'tones' in _dict: - args['tones'] = [ - ToneScore._from_dict(x) for x in (_dict.get('tones')) - ] - else: - raise ValueError( - 'Required property \'tones\' not present in ToneCategory JSON') - if 'category_id' in _dict: - args['category_id'] = _dict.get('category_id') - else: - raise ValueError( - 'Required property \'category_id\' not present in ToneCategory JSON' - ) - if 'category_name' in _dict: - args['category_name'] = _dict.get('category_name') - else: - raise ValueError( - 'Required property \'category_name\' not present in ToneCategory JSON' - ) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'tones') and self.tones is not None: - _dict['tones'] = [x._to_dict() for x in self.tones] - if hasattr(self, 'category_id') and self.category_id is not None: - _dict['category_id'] = self.category_id - if hasattr(self, 'category_name') and self.category_name is not None: - _dict['category_name'] = self.category_name - return _dict - - def __str__(self): - """Return a `str` version of this ToneCategory object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class ToneChatScore(object): - """ - ToneChatScore. - - :attr float score: The score for the tone in the range of 0.5 to 1. A score greater - than 0.75 indicates a high likelihood that the tone is perceived in the utterance. - :attr str tone_id: The unique, non-localized identifier of the tone for the results. - The service returns results only for tones whose scores meet a minimum threshold of - 0.5. - :attr str tone_name: The user-visible, localized name of the tone. - """ - - def __init__(self, score, tone_id, tone_name): - """ - Initialize a ToneChatScore object. - - :param float score: The score for the tone in the range of 0.5 to 1. A score - greater than 0.75 indicates a high likelihood that the tone is perceived in the - utterance. - :param str tone_id: The unique, non-localized identifier of the tone for the - results. The service returns results only for tones whose scores meet a minimum - threshold of 0.5. - :param str tone_name: The user-visible, localized name of the tone. - """ - self.score = score - self.tone_id = tone_id - self.tone_name = tone_name - - @classmethod - def _from_dict(cls, _dict): - """Initialize a ToneChatScore object from a json dictionary.""" - args = {} - if 'score' in _dict: - args['score'] = _dict.get('score') - else: - raise ValueError( - 'Required property \'score\' not present in ToneChatScore JSON') - if 'tone_id' in _dict: - args['tone_id'] = _dict.get('tone_id') - else: - raise ValueError( - 'Required property \'tone_id\' not present in ToneChatScore JSON' - ) - if 'tone_name' in _dict: - args['tone_name'] = _dict.get('tone_name') - else: - raise ValueError( - 'Required property \'tone_name\' not present in ToneChatScore JSON' - ) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'score') and self.score is not None: - _dict['score'] = self.score - if hasattr(self, 'tone_id') and self.tone_id is not None: - _dict['tone_id'] = self.tone_id - if hasattr(self, 'tone_name') and self.tone_name is not None: - _dict['tone_name'] = self.tone_name - return _dict - - def __str__(self): - """Return a `str` version of this ToneChatScore object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class ToneInput(object): - """ - ToneInput. - - :attr str text: The input content that the service is to analyze. - """ - - def __init__(self, text): - """ - Initialize a ToneInput object. - - :param str text: The input content that the service is to analyze. - """ - self.text = text - - @classmethod - def _from_dict(cls, _dict): - """Initialize a ToneInput object from a json dictionary.""" - args = {} - if 'text' in _dict: - args['text'] = _dict.get('text') - else: - raise ValueError( - 'Required property \'text\' not present in ToneInput JSON') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - return _dict - - def __str__(self): - """Return a `str` version of this ToneInput object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class ToneScore(object): - """ - ToneScore. - - :attr float score: The score for the tone. - * **`2017-09-21`:** The score that is returned lies in the range of 0.5 to 1. A score - greater than 0.75 indicates a high likelihood that the tone is perceived in the - content. - * **`2016-05-19`:** The score that is returned lies in the range of 0 to 1. A score - less than 0.5 indicates that the tone is unlikely to be perceived in the content; a - score greater than 0.75 indicates a high likelihood that the tone is perceived. - :attr str tone_id: The unique, non-localized identifier of the tone. - * **`2017-09-21`:** The service can return results for the following tone IDs: - `anger`, `fear`, `joy`, and `sadness` (emotional tones); `analytical`, `confident`, - and `tentative` (language tones). The service returns results only for tones whose - scores meet a minimum threshold of 0.5. - * **`2016-05-19`:** The service can return results for the following tone IDs of the - different categories: for the `emotion` category: `anger`, `disgust`, `fear`, `joy`, - and `sadness`; for the `language` category: `analytical`, `confident`, and - `tentative`; for the `social` category: `openness_big5`, `conscientiousness_big5`, - `extraversion_big5`, `agreeableness_big5`, and `emotional_range_big5`. The service - returns scores for all tones of a category, regardless of their values. - :attr str tone_name: The user-visible, localized name of the tone. - """ - - def __init__(self, score, tone_id, tone_name): - """ - Initialize a ToneScore object. - - :param float score: The score for the tone. - * **`2017-09-21`:** The score that is returned lies in the range of 0.5 to 1. A - score greater than 0.75 indicates a high likelihood that the tone is perceived in - the content. - * **`2016-05-19`:** The score that is returned lies in the range of 0 to 1. A - score less than 0.5 indicates that the tone is unlikely to be perceived in the - content; a score greater than 0.75 indicates a high likelihood that the tone is - perceived. - :param str tone_id: The unique, non-localized identifier of the tone. - * **`2017-09-21`:** The service can return results for the following tone IDs: - `anger`, `fear`, `joy`, and `sadness` (emotional tones); `analytical`, - `confident`, and `tentative` (language tones). The service returns results only - for tones whose scores meet a minimum threshold of 0.5. - * **`2016-05-19`:** The service can return results for the following tone IDs of - the different categories: for the `emotion` category: `anger`, `disgust`, `fear`, - `joy`, and `sadness`; for the `language` category: `analytical`, `confident`, and - `tentative`; for the `social` category: `openness_big5`, `conscientiousness_big5`, - `extraversion_big5`, `agreeableness_big5`, and `emotional_range_big5`. The service - returns scores for all tones of a category, regardless of their values. - :param str tone_name: The user-visible, localized name of the tone. - """ - self.score = score - self.tone_id = tone_id - self.tone_name = tone_name - - @classmethod - def _from_dict(cls, _dict): - """Initialize a ToneScore object from a json dictionary.""" - args = {} - if 'score' in _dict: - args['score'] = _dict.get('score') - else: - raise ValueError( - 'Required property \'score\' not present in ToneScore JSON') - if 'tone_id' in _dict: - args['tone_id'] = _dict.get('tone_id') - else: - raise ValueError( - 'Required property \'tone_id\' not present in ToneScore JSON') - if 'tone_name' in _dict: - args['tone_name'] = _dict.get('tone_name') - else: - raise ValueError( - 'Required property \'tone_name\' not present in ToneScore JSON') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'score') and self.score is not None: - _dict['score'] = self.score - if hasattr(self, 'tone_id') and self.tone_id is not None: - _dict['tone_id'] = self.tone_id - if hasattr(self, 'tone_name') and self.tone_name is not None: - _dict['tone_name'] = self.tone_name - return _dict - - def __str__(self): - """Return a `str` version of this ToneScore object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Utterance(object): - """ - Utterance. - - :attr str text: An utterance contributed by a user in the conversation that is to be - analyzed. The utterance can contain multiple sentences. - :attr str user: (optional) A string that identifies the user who contributed the - utterance specified by the `text` parameter. - """ - - def __init__(self, text, user=None): - """ - Initialize a Utterance object. - - :param str text: An utterance contributed by a user in the conversation that is to - be analyzed. The utterance can contain multiple sentences. - :param str user: (optional) A string that identifies the user who contributed the - utterance specified by the `text` parameter. - """ - self.text = text - self.user = user - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Utterance object from a json dictionary.""" - args = {} - if 'text' in _dict: - args['text'] = _dict.get('text') - else: - raise ValueError( - 'Required property \'text\' not present in Utterance JSON') - if 'user' in _dict: - args['user'] = _dict.get('user') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'text') and self.text is not None: - _dict['text'] = self.text - if hasattr(self, 'user') and self.user is not None: - _dict['user'] = self.user - return _dict - - def __str__(self): - """Return a `str` version of this Utterance object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class UtteranceAnalyses(object): - """ - UtteranceAnalyses. - - :attr list[UtteranceAnalysis] utterances_tone: An array of `UtteranceAnalysis` objects - that provides the results for each utterance of the input. - :attr str warning: (optional) **`2017-09-21`:** A warning message if the content - contains more than 50 utterances. The service analyzes only the first 50 utterances. - **`2016-05-19`:** Not returned. - """ - - def __init__(self, utterances_tone, warning=None): - """ - Initialize a UtteranceAnalyses object. - - :param list[UtteranceAnalysis] utterances_tone: An array of `UtteranceAnalysis` - objects that provides the results for each utterance of the input. - :param str warning: (optional) **`2017-09-21`:** A warning message if the content - contains more than 50 utterances. The service analyzes only the first 50 - utterances. **`2016-05-19`:** Not returned. - """ - self.utterances_tone = utterances_tone - self.warning = warning - - @classmethod - def _from_dict(cls, _dict): - """Initialize a UtteranceAnalyses object from a json dictionary.""" - args = {} - if 'utterances_tone' in _dict: - args['utterances_tone'] = [ - UtteranceAnalysis._from_dict(x) - for x in (_dict.get('utterances_tone')) - ] - else: - raise ValueError( - 'Required property \'utterances_tone\' not present in UtteranceAnalyses JSON' - ) - if 'warning' in _dict: - args['warning'] = _dict.get('warning') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, - 'utterances_tone') and self.utterances_tone is not None: - _dict['utterances_tone'] = [ - x._to_dict() for x in self.utterances_tone - ] - if hasattr(self, 'warning') and self.warning is not None: - _dict['warning'] = self.warning - return _dict - - def __str__(self): - """Return a `str` version of this UtteranceAnalyses object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class UtteranceAnalysis(object): - """ - UtteranceAnalysis. - - :attr int utterance_id: The unique identifier of the utterance. The first utterance - has ID 0, and the ID of each subsequent utterance is incremented by one. - :attr str utterance_text: The text of the utterance. - :attr list[ToneChatScore] tones: An array of `ToneChatScore` objects that provides - results for the most prevalent tones of the utterance. The array includes results for - any tone whose score is at least 0.5. The array is empty if no tone has a score that - meets this threshold. - :attr str error: (optional) **`2017-09-21`:** An error message if the utterance - contains more than 500 characters. The service does not analyze the utterance. - **`2016-05-19`:** Not returned. - """ - - def __init__(self, utterance_id, utterance_text, tones, error=None): - """ - Initialize a UtteranceAnalysis object. - - :param int utterance_id: The unique identifier of the utterance. The first - utterance has ID 0, and the ID of each subsequent utterance is incremented by one. - :param str utterance_text: The text of the utterance. - :param list[ToneChatScore] tones: An array of `ToneChatScore` objects that - provides results for the most prevalent tones of the utterance. The array includes - results for any tone whose score is at least 0.5. The array is empty if no tone - has a score that meets this threshold. - :param str error: (optional) **`2017-09-21`:** An error message if the utterance - contains more than 500 characters. The service does not analyze the utterance. - **`2016-05-19`:** Not returned. - """ - self.utterance_id = utterance_id - self.utterance_text = utterance_text - self.tones = tones - self.error = error - - @classmethod - def _from_dict(cls, _dict): - """Initialize a UtteranceAnalysis object from a json dictionary.""" - args = {} - if 'utterance_id' in _dict: - args['utterance_id'] = _dict.get('utterance_id') - else: - raise ValueError( - 'Required property \'utterance_id\' not present in UtteranceAnalysis JSON' - ) - if 'utterance_text' in _dict: - args['utterance_text'] = _dict.get('utterance_text') - else: - raise ValueError( - 'Required property \'utterance_text\' not present in UtteranceAnalysis JSON' - ) - if 'tones' in _dict: - args['tones'] = [ - ToneChatScore._from_dict(x) for x in (_dict.get('tones')) - ] - else: - raise ValueError( - 'Required property \'tones\' not present in UtteranceAnalysis JSON' - ) - if 'error' in _dict: - args['error'] = _dict.get('error') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'utterance_id') and self.utterance_id is not None: - _dict['utterance_id'] = self.utterance_id - if hasattr(self, 'utterance_text') and self.utterance_text is not None: - _dict['utterance_text'] = self.utterance_text - if hasattr(self, 'tones') and self.tones is not None: - _dict['tones'] = [x._to_dict() for x in self.tones] - if hasattr(self, 'error') and self.error is not None: - _dict['error'] = self.error - return _dict - - def __str__(self): - """Return a `str` version of this UtteranceAnalysis object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other diff --git a/ibm_watson/version.py b/ibm_watson/version.py index 9a8a1b10a..00eae9620 100644 --- a/ibm_watson/version.py +++ b/ibm_watson/version.py @@ -1 +1 @@ -__version__ = '3.0.4' +__version__ = '11.2.0' diff --git a/ibm_watson/visual_recognition_v3.py b/ibm_watson/visual_recognition_v3.py deleted file mode 100644 index 05dd2f16c..000000000 --- a/ibm_watson/visual_recognition_v3.py +++ /dev/null @@ -1,1771 +0,0 @@ -# coding: utf-8 - -# Copyright 2018 IBM All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -The IBM Watson™ Visual Recognition service uses deep learning algorithms to identify -scenes, objects, and faces in images you upload to the service. You can create and train -a custom classifier to identify subjects that suit your needs. -""" - -from __future__ import absolute_import - -import json -from .common import get_sdk_headers -from ibm_cloud_sdk_core import BaseService -from ibm_cloud_sdk_core import datetime_to_string, string_to_datetime -from os.path import basename - -############################################################################## -# Service -############################################################################## - - -class VisualRecognitionV3(BaseService): - """The Visual Recognition V3 service.""" - - default_url = 'https://gateway.watsonplatform.net/visual-recognition/api' - - def __init__( - self, - version, - url=default_url, - iam_apikey=None, - iam_access_token=None, - iam_url=None, - ): - """ - Construct a new client for the Visual Recognition service. - - :param str version: The API version date to use with the service, in - "YYYY-MM-DD" format. Whenever the API is changed in a backwards - incompatible way, a new minor version of the API is released. - The service uses the API version for the date you specify, or - the most recent version before that date. Note that you should - not programmatically specify the current date at runtime, in - case the API has been updated since your application's release. - Instead, specify a version date that is compatible with your - application, and don't change it until your application is - ready for a later version. - - :param str url: The base url to use when contacting the service (e.g. - "https://gateway.watsonplatform.net/visual-recognition/api/visual-recognition/api"). - The base url may differ between IBM Cloud regions. - - :param str iam_apikey: An API key that can be used to request IAM tokens. If - this API key is provided, the SDK will manage the token and handle the - refreshing. - - :param str iam_access_token: An IAM access token is fully managed by the application. - Responsibility falls on the application to refresh the token, either before - it expires or reactively upon receiving a 401 from the service as any requests - made with an expired token will fail. - - :param str iam_url: An optional URL for the IAM service API. Defaults to - 'https://iam.cloud.ibm.com/identity/token'. - """ - - BaseService.__init__( - self, - vcap_services_name='watson_vision_combined', - url=url, - iam_apikey=iam_apikey, - iam_access_token=iam_access_token, - iam_url=iam_url, - use_vcap_services=True, - display_name='Visual Recognition') - self.version = version - - ######################### - # General - ######################### - - def classify(self, - images_file=None, - images_filename=None, - images_file_content_type=None, - url=None, - threshold=None, - owners=None, - classifier_ids=None, - accept_language=None, - **kwargs): - """ - Classify images. - - Classify images with built-in or custom classifiers. - - :param file images_file: An image file (.gif, .jpg, .png, .tif) or .zip file with - images. Maximum image size is 10 MB. Include no more than 20 images and limit the - .zip file to 100 MB. Encode the image and .zip file names in UTF-8 if they contain - non-ASCII characters. The service assumes UTF-8 encoding if it encounters - non-ASCII characters. - You can also include an image with the **url** parameter. - :param str images_filename: The filename for images_file. - :param str images_file_content_type: The content type of images_file. - :param str url: The URL of an image (.gif, .jpg, .png, .tif) to analyze. The - minimum recommended pixel density is 32X32 pixels, but the service tends to - perform better with images that are at least 224 x 224 pixels. The maximum image - size is 10 MB. - You can also include images with the **images_file** parameter. - :param float threshold: The minimum score a class must have to be displayed in the - response. Set the threshold to `0.0` to return all identified classes. - :param list[str] owners: The categories of classifiers to apply. The - **classifier_ids** parameter overrides **owners**, so make sure that - **classifier_ids** is empty. - - Use `IBM` to classify against the `default` general classifier. You get the same - result if both **classifier_ids** and **owners** parameters are empty. - - Use `me` to classify against all your custom classifiers. However, for better - performance use **classifier_ids** to specify the specific custom classifiers to - apply. - - Use both `IBM` and `me` to analyze the image against both classifier categories. - :param list[str] classifier_ids: Which classifiers to apply. Overrides the - **owners** parameter. You can specify both custom and built-in classifier IDs. The - built-in `default` classifier is used if both **classifier_ids** and **owners** - parameters are empty. - The following built-in classifier IDs require no training: - - `default`: Returns classes from thousands of general tags. - - `food`: Enhances specificity and accuracy for images of food items. - - `explicit`: Evaluates whether the image might be pornographic. - :param str accept_language: The desired language of parts of the response. See the - response for details. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - headers = {'Accept-Language': accept_language} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('watson_vision_combined', 'V3', - 'classify') - headers.update(sdk_headers) - - params = {'version': self.version} - - form_data = {} - if images_file: - if not images_filename and hasattr(images_file, 'name'): - images_filename = basename(images_file.name) - if not images_filename: - raise ValueError('images_filename must be provided') - form_data['images_file'] = (images_filename, images_file, - images_file_content_type or - 'application/octet-stream') - if url: - form_data['url'] = (None, url, 'text/plain') - if threshold: - form_data['threshold'] = (None, threshold, 'application/json') - if owners: - owners = self._convert_list(owners) - form_data['owners'] = (None, owners, 'application/json') - if classifier_ids: - classifier_ids = self._convert_list(classifier_ids) - form_data['classifier_ids'] = (None, classifier_ids, - 'application/json') - - url = '/v3/classify' - response = self.request( - method='POST', - url=url, - headers=headers, - params=params, - files=form_data, - accept_json=True) - return response - - ######################### - # Face - ######################### - - def detect_faces(self, - images_file=None, - images_filename=None, - images_file_content_type=None, - url=None, - accept_language=None, - **kwargs): - """ - Detect faces in images. - - **Important:** On April 2, 2018, the identity information in the response to calls - to the Face model was removed. The identity information refers to the `name` of - the person, `score`, and `type_hierarchy` knowledge graph. For details about the - enhanced Face model, see the [Release - notes](https://cloud.ibm.com/docs/services/visual-recognition/release-notes.html#2april2018). - Analyze and get data about faces in images. Responses can include estimated age - and gender. This feature uses a built-in model, so no training is necessary. The - Detect faces method does not support general biometric facial recognition. - Supported image formats include .gif, .jpg, .png, and .tif. The maximum image size - is 10 MB. The minimum recommended pixel density is 32X32 pixels, but the service - tends to perform better with images that are at least 224 x 224 pixels. - - :param file images_file: An image file (gif, .jpg, .png, .tif.) or .zip file with - images. Limit the .zip file to 100 MB. You can include a maximum of 15 images in a - request. - Encode the image and .zip file names in UTF-8 if they contain non-ASCII - characters. The service assumes UTF-8 encoding if it encounters non-ASCII - characters. - You can also include an image with the **url** parameter. - :param str images_filename: The filename for images_file. - :param str images_file_content_type: The content type of images_file. - :param str url: The URL of an image to analyze. Must be in .gif, .jpg, .png, or - .tif format. The minimum recommended pixel density is 32X32 pixels, but the - service tends to perform better with images that are at least 224 x 224 pixels. - The maximum image size is 10 MB. Redirects are followed, so you can use a - shortened URL. - You can also include images with the **images_file** parameter. - :param str accept_language: The desired language of parts of the response. See the - response for details. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - headers = {'Accept-Language': accept_language} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('watson_vision_combined', 'V3', - 'detect_faces') - headers.update(sdk_headers) - - params = {'version': self.version} - - form_data = {} - if images_file: - if not images_filename and hasattr(images_file, 'name'): - images_filename = basename(images_file.name) - if not images_filename: - raise ValueError('images_filename must be provided') - form_data['images_file'] = (images_filename, images_file, - images_file_content_type or - 'application/octet-stream') - if url: - form_data['url'] = (None, url, 'text/plain') - - url = '/v3/detect_faces' - response = self.request( - method='POST', - url=url, - headers=headers, - params=params, - files=form_data, - accept_json=True) - return response - - ######################### - # Custom - ######################### - - def create_classifier(self, - name, - positive_examples, - negative_examples=None, - negative_examples_filename=None, - **kwargs): - """ - Create a classifier. - - Train a new multi-faceted classifier on the uploaded image data. Create your - custom classifier with positive or negative examples. Include at least two sets of - examples, either two positive example files or one positive and one negative file. - You can upload a maximum of 256 MB per call. - Encode all names in UTF-8 if they contain non-ASCII characters (.zip and image - file names, and classifier and class names). The service assumes UTF-8 encoding if - it encounters non-ASCII characters. - - :param str name: The name of the new classifier. Encode special characters in - UTF-8. - :param dict positive_examples: A dictionary that contains the value for each - classname. The value is a .zip file of images that depict the visual subject of a - class in the new classifier. You can include more than one positive example file - in a call. - Specify the parameter name by appending `_positive_examples` to the class name. - For example, `goldenretriever_positive_examples` creates the class - **goldenretriever**. - Include at least 10 images in .jpg or .png format. The minimum recommended image - resolution is 32X32 pixels. The maximum number of images is 10,000 images or 100 - MB per .zip file. - Encode special characters in the file name in UTF-8. - :param file negative_examples: A .zip file of images that do not depict the visual - subject of any of the classes of the new classifier. Must contain a minimum of 10 - images. - Encode special characters in the file name in UTF-8. - :param str negative_examples_filename: The filename for negative_examples. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if name is None: - raise ValueError('name must be provided') - if not positive_examples: - raise ValueError('positive_examples must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('watson_vision_combined', 'V3', - 'create_classifier') - headers.update(sdk_headers) - - params = {'version': self.version} - - form_data = {} - form_data['name'] = (None, name, 'text/plain') - for key in positive_examples.keys(): - part_name = '%s_positive_examples' % (key) - value = positive_examples[key] - if hasattr(value, 'name'): - filename = basename(value.name) - form_data[part_name] = (filename, value, 'application/octet-stream') - if negative_examples: - if not negative_examples_filename and hasattr( - negative_examples, 'name'): - negative_examples_filename = basename(negative_examples.name) - if not negative_examples_filename: - raise ValueError('negative_examples_filename must be provided') - form_data['negative_examples'] = (negative_examples_filename, - negative_examples, - 'application/octet-stream') - - url = '/v3/classifiers' - response = self.request( - method='POST', - url=url, - headers=headers, - params=params, - files=form_data, - accept_json=True) - return response - - def delete_classifier(self, classifier_id, **kwargs): - """ - Delete a classifier. - - :param str classifier_id: The ID of the classifier. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if classifier_id is None: - raise ValueError('classifier_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('watson_vision_combined', 'V3', - 'delete_classifier') - headers.update(sdk_headers) - - params = {'version': self.version} - - url = '/v3/classifiers/{0}'.format( - *self._encode_path_vars(classifier_id)) - response = self.request( - method='DELETE', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def get_classifier(self, classifier_id, **kwargs): - """ - Retrieve classifier details. - - Retrieve information about a custom classifier. - - :param str classifier_id: The ID of the classifier. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if classifier_id is None: - raise ValueError('classifier_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('watson_vision_combined', 'V3', - 'get_classifier') - headers.update(sdk_headers) - - params = {'version': self.version} - - url = '/v3/classifiers/{0}'.format( - *self._encode_path_vars(classifier_id)) - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def list_classifiers(self, verbose=None, **kwargs): - """ - Retrieve a list of classifiers. - - :param bool verbose: Specify `true` to return details about the classifiers. Omit - this parameter to return a brief list of classifiers. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('watson_vision_combined', 'V3', - 'list_classifiers') - headers.update(sdk_headers) - - params = {'version': self.version, 'verbose': verbose} - - url = '/v3/classifiers' - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - def update_classifier(self, - classifier_id, - positive_examples={}, - negative_examples=None, - negative_examples_filename=None, - **kwargs): - """ - Update a classifier. - - Update a custom classifier by adding new positive or negative classes or by adding - new images to existing classes. You must supply at least one set of positive or - negative examples. For details, see [Updating custom - classifiers](https://cloud.ibm.com/docs/services/visual-recognition/customizing.html#updating-custom-classifiers). - Encode all names in UTF-8 if they contain non-ASCII characters (.zip and image - file names, and classifier and class names). The service assumes UTF-8 encoding if - it encounters non-ASCII characters. - **Tip:** Don't make retraining calls on a classifier until the status is ready. - When you submit retraining requests in parallel, the last request overwrites the - previous requests. The retrained property shows the last time the classifier - retraining finished. - - :param str classifier_id: The ID of the classifier. - :param dict positive_examples: A dictionary that contains the value for each - classname. The value is a .zip file of images that depict the visual subject of a - class in the classifier. The positive examples create or update classes in the - classifier. You can include more than one positive example file in a call. - Specify the parameter name by appending `_positive_examples` to the class name. - For example, `goldenretriever_positive_examples` creates the class - `goldenretriever`. - Include at least 10 images in .jpg or .png format. The minimum recommended image - resolution is 32X32 pixels. The maximum number of images is 10,000 images or 100 - MB per .zip file. - Encode special characters in the file name in UTF-8. - :param file negative_examples: A .zip file of images that do not depict the visual - subject of any of the classes of the new classifier. Must contain a minimum of 10 - images. - Encode special characters in the file name in UTF-8. - :param str negative_examples_filename: The filename for negative_examples. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if classifier_id is None: - raise ValueError('classifier_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('watson_vision_combined', 'V3', - 'update_classifier') - headers.update(sdk_headers) - - params = {'version': self.version} - - form_data = {} - for key in positive_examples.keys(): - part_name = '%s_positive_examples' % (key) - value = positive_examples[key] - if hasattr(value, 'name'): - filename = basename(value.name) - form_data[part_name] = (filename, value, 'application/octet-stream') - if negative_examples: - if not negative_examples_filename and hasattr( - negative_examples, 'name'): - negative_examples_filename = basename(negative_examples.name) - if not negative_examples_filename: - raise ValueError('negative_examples_filename must be provided') - form_data['negative_examples'] = (negative_examples_filename, - negative_examples, - 'application/octet-stream') - - url = '/v3/classifiers/{0}'.format( - *self._encode_path_vars(classifier_id)) - response = self.request( - method='POST', - url=url, - headers=headers, - params=params, - files=form_data, - accept_json=True) - return response - - ######################### - # Core ML - ######################### - - def get_core_ml_model(self, classifier_id, **kwargs): - """ - Retrieve a Core ML model of a classifier. - - Download a Core ML model file (.mlmodel) of a custom classifier that returns - \"core_ml_enabled\": true in the classifier details. - - :param str classifier_id: The ID of the classifier. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if classifier_id is None: - raise ValueError('classifier_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('watson_vision_combined', 'V3', - 'get_core_ml_model') - headers.update(sdk_headers) - - params = {'version': self.version} - - url = '/v3/classifiers/{0}/core_ml_model'.format( - *self._encode_path_vars(classifier_id)) - response = self.request( - method='GET', - url=url, - headers=headers, - params=params, - accept_json=False) - return response - - ######################### - # User data - ######################### - - def delete_user_data(self, customer_id, **kwargs): - """ - Delete labeled data. - - Deletes all data associated with a specified customer ID. The method has no effect - if no data is associated with the customer ID. - You associate a customer ID with data by passing the `X-Watson-Metadata` header - with a request that passes data. For more information about personal data and - customer IDs, see [Information - security](https://cloud.ibm.com/docs/services/visual-recognition/information-security.html). - - :param str customer_id: The customer ID for which all data is to be deleted. - :param dict headers: A `dict` containing the request headers - :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse - """ - - if customer_id is None: - raise ValueError('customer_id must be provided') - - headers = {} - if 'headers' in kwargs: - headers.update(kwargs.get('headers')) - sdk_headers = get_sdk_headers('watson_vision_combined', 'V3', - 'delete_user_data') - headers.update(sdk_headers) - - params = {'version': self.version, 'customer_id': customer_id} - - url = '/v3/user_data' - response = self.request( - method='DELETE', - url=url, - headers=headers, - params=params, - accept_json=True) - return response - - -############################################################################## -# Models -############################################################################## - - -class Class(object): - """ - A category within a classifier. - - :attr str class_name: The name of the class. - """ - - def __init__(self, class_name): - """ - Initialize a Class object. - - :param str class_name: The name of the class. - """ - self.class_name = class_name - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Class object from a json dictionary.""" - args = {} - if 'class' in _dict or 'class_name' in _dict: - args['class_name'] = _dict.get('class') or _dict.get('class_name') - else: - raise ValueError( - 'Required property \'class\' not present in Class JSON') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'class_name') and self.class_name is not None: - _dict['class'] = self.class_name - return _dict - - def __str__(self): - """Return a `str` version of this Class object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class ClassResult(object): - """ - Result of a class within a classifier. - - :attr str class_name: Name of the class. - Class names are translated in the language defined by the **Accept-Language** request - header for the build-in classifier IDs (`default`, `food`, and `explicit`). Class - names of custom classifiers are not translated. The response might not be in the - specified language when the requested language is not supported or when there is no - translation for the class name. - :attr float score: Confidence score for the property in the range of 0 to 1. A higher - score indicates greater likelihood that the class is depicted in the image. The - default threshold for returning scores from a classifier is 0.5. - :attr str type_hierarchy: (optional) Knowledge graph of the property. For example, - `/fruit/pome/apple/eating apple/Granny Smith`. Included only if identified. - """ - - def __init__(self, class_name, score, type_hierarchy=None): - """ - Initialize a ClassResult object. - - :param str class_name: Name of the class. - Class names are translated in the language defined by the **Accept-Language** - request header for the build-in classifier IDs (`default`, `food`, and - `explicit`). Class names of custom classifiers are not translated. The response - might not be in the specified language when the requested language is not - supported or when there is no translation for the class name. - :param float score: Confidence score for the property in the range of 0 to 1. A - higher score indicates greater likelihood that the class is depicted in the image. - The default threshold for returning scores from a classifier is 0.5. - :param str type_hierarchy: (optional) Knowledge graph of the property. For - example, `/fruit/pome/apple/eating apple/Granny Smith`. Included only if - identified. - """ - self.class_name = class_name - self.score = score - self.type_hierarchy = type_hierarchy - - @classmethod - def _from_dict(cls, _dict): - """Initialize a ClassResult object from a json dictionary.""" - args = {} - if 'class' in _dict or 'class_name' in _dict: - args['class_name'] = _dict.get('class') or _dict.get('class_name') - else: - raise ValueError( - 'Required property \'class\' not present in ClassResult JSON') - if 'score' in _dict: - args['score'] = _dict.get('score') - else: - raise ValueError( - 'Required property \'score\' not present in ClassResult JSON') - if 'type_hierarchy' in _dict: - args['type_hierarchy'] = _dict.get('type_hierarchy') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'class_name') and self.class_name is not None: - _dict['class'] = self.class_name - if hasattr(self, 'score') and self.score is not None: - _dict['score'] = self.score - if hasattr(self, 'type_hierarchy') and self.type_hierarchy is not None: - _dict['type_hierarchy'] = self.type_hierarchy - return _dict - - def __str__(self): - """Return a `str` version of this ClassResult object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class ClassifiedImage(object): - """ - Results for one image. - - :attr str source_url: (optional) Source of the image before any redirects. Not - returned when the image is uploaded. - :attr str resolved_url: (optional) Fully resolved URL of the image after redirects are - followed. Not returned when the image is uploaded. - :attr str image: (optional) Relative path of the image file if uploaded directly. Not - returned when the image is passed by URL. - :attr ErrorInfo error: (optional) Information about what might have caused a failure, - such as an image that is too large. Not returned when there is no error. - :attr list[ClassifierResult] classifiers: The classifiers. - """ - - def __init__(self, - classifiers, - source_url=None, - resolved_url=None, - image=None, - error=None): - """ - Initialize a ClassifiedImage object. - - :param list[ClassifierResult] classifiers: The classifiers. - :param str source_url: (optional) Source of the image before any redirects. Not - returned when the image is uploaded. - :param str resolved_url: (optional) Fully resolved URL of the image after - redirects are followed. Not returned when the image is uploaded. - :param str image: (optional) Relative path of the image file if uploaded directly. - Not returned when the image is passed by URL. - :param ErrorInfo error: (optional) Information about what might have caused a - failure, such as an image that is too large. Not returned when there is no error. - """ - self.source_url = source_url - self.resolved_url = resolved_url - self.image = image - self.error = error - self.classifiers = classifiers - - @classmethod - def _from_dict(cls, _dict): - """Initialize a ClassifiedImage object from a json dictionary.""" - args = {} - if 'source_url' in _dict: - args['source_url'] = _dict.get('source_url') - if 'resolved_url' in _dict: - args['resolved_url'] = _dict.get('resolved_url') - if 'image' in _dict: - args['image'] = _dict.get('image') - if 'error' in _dict: - args['error'] = ErrorInfo._from_dict(_dict.get('error')) - if 'classifiers' in _dict: - args['classifiers'] = [ - ClassifierResult._from_dict(x) - for x in (_dict.get('classifiers')) - ] - else: - raise ValueError( - 'Required property \'classifiers\' not present in ClassifiedImage JSON' - ) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'source_url') and self.source_url is not None: - _dict['source_url'] = self.source_url - if hasattr(self, 'resolved_url') and self.resolved_url is not None: - _dict['resolved_url'] = self.resolved_url - if hasattr(self, 'image') and self.image is not None: - _dict['image'] = self.image - if hasattr(self, 'error') and self.error is not None: - _dict['error'] = self.error._to_dict() - if hasattr(self, 'classifiers') and self.classifiers is not None: - _dict['classifiers'] = [x._to_dict() for x in self.classifiers] - return _dict - - def __str__(self): - """Return a `str` version of this ClassifiedImage object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class ClassifiedImages(object): - """ - Results for all images. - - :attr int custom_classes: (optional) Number of custom classes identified in the - images. - :attr int images_processed: (optional) Number of images processed for the API call. - :attr list[ClassifiedImage] images: Classified images. - :attr list[WarningInfo] warnings: (optional) Information about what might cause less - than optimal output. For example, a request sent with a corrupt .zip file and a list - of image URLs will still complete, but does not return the expected output. Not - returned when there is no warning. - """ - - def __init__(self, - images, - custom_classes=None, - images_processed=None, - warnings=None): - """ - Initialize a ClassifiedImages object. - - :param list[ClassifiedImage] images: Classified images. - :param int custom_classes: (optional) Number of custom classes identified in the - images. - :param int images_processed: (optional) Number of images processed for the API - call. - :param list[WarningInfo] warnings: (optional) Information about what might cause - less than optimal output. For example, a request sent with a corrupt .zip file and - a list of image URLs will still complete, but does not return the expected output. - Not returned when there is no warning. - """ - self.custom_classes = custom_classes - self.images_processed = images_processed - self.images = images - self.warnings = warnings - - @classmethod - def _from_dict(cls, _dict): - """Initialize a ClassifiedImages object from a json dictionary.""" - args = {} - if 'custom_classes' in _dict: - args['custom_classes'] = _dict.get('custom_classes') - if 'images_processed' in _dict: - args['images_processed'] = _dict.get('images_processed') - if 'images' in _dict: - args['images'] = [ - ClassifiedImage._from_dict(x) for x in (_dict.get('images')) - ] - else: - raise ValueError( - 'Required property \'images\' not present in ClassifiedImages JSON' - ) - if 'warnings' in _dict: - args['warnings'] = [ - WarningInfo._from_dict(x) for x in (_dict.get('warnings')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'custom_classes') and self.custom_classes is not None: - _dict['custom_classes'] = self.custom_classes - if hasattr(self, - 'images_processed') and self.images_processed is not None: - _dict['images_processed'] = self.images_processed - if hasattr(self, 'images') and self.images is not None: - _dict['images'] = [x._to_dict() for x in self.images] - if hasattr(self, 'warnings') and self.warnings is not None: - _dict['warnings'] = [x._to_dict() for x in self.warnings] - return _dict - - def __str__(self): - """Return a `str` version of this ClassifiedImages object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Classifier(object): - """ - Information about a classifier. - - :attr str classifier_id: ID of a classifier identified in the image. - :attr str name: Name of the classifier. - :attr str owner: (optional) Unique ID of the account who owns the classifier. Might - not be returned by some requests. - :attr str status: (optional) Training status of classifier. - :attr bool core_ml_enabled: (optional) Whether the classifier can be downloaded as a - Core ML model after the training status is `ready`. - :attr str explanation: (optional) If classifier training has failed, this field might - explain why. - :attr datetime created: (optional) Date and time in Coordinated Universal Time (UTC) - that the classifier was created. - :attr list[Class] classes: (optional) Classes that define a classifier. - :attr datetime retrained: (optional) Date and time in Coordinated Universal Time (UTC) - that the classifier was updated. Might not be returned by some requests. Identical to - `updated` and retained for backward compatibility. - :attr datetime updated: (optional) Date and time in Coordinated Universal Time (UTC) - that the classifier was most recently updated. The field matches either `retrained` or - `created`. Might not be returned by some requests. - """ - - def __init__(self, - classifier_id, - name, - owner=None, - status=None, - core_ml_enabled=None, - explanation=None, - created=None, - classes=None, - retrained=None, - updated=None): - """ - Initialize a Classifier object. - - :param str classifier_id: ID of a classifier identified in the image. - :param str name: Name of the classifier. - :param str owner: (optional) Unique ID of the account who owns the classifier. - Might not be returned by some requests. - :param str status: (optional) Training status of classifier. - :param bool core_ml_enabled: (optional) Whether the classifier can be downloaded - as a Core ML model after the training status is `ready`. - :param str explanation: (optional) If classifier training has failed, this field - might explain why. - :param datetime created: (optional) Date and time in Coordinated Universal Time - (UTC) that the classifier was created. - :param list[Class] classes: (optional) Classes that define a classifier. - :param datetime retrained: (optional) Date and time in Coordinated Universal Time - (UTC) that the classifier was updated. Might not be returned by some requests. - Identical to `updated` and retained for backward compatibility. - :param datetime updated: (optional) Date and time in Coordinated Universal Time - (UTC) that the classifier was most recently updated. The field matches either - `retrained` or `created`. Might not be returned by some requests. - """ - self.classifier_id = classifier_id - self.name = name - self.owner = owner - self.status = status - self.core_ml_enabled = core_ml_enabled - self.explanation = explanation - self.created = created - self.classes = classes - self.retrained = retrained - self.updated = updated - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Classifier object from a json dictionary.""" - args = {} - if 'classifier_id' in _dict: - args['classifier_id'] = _dict.get('classifier_id') - else: - raise ValueError( - 'Required property \'classifier_id\' not present in Classifier JSON' - ) - if 'name' in _dict: - args['name'] = _dict.get('name') - else: - raise ValueError( - 'Required property \'name\' not present in Classifier JSON') - if 'owner' in _dict: - args['owner'] = _dict.get('owner') - if 'status' in _dict: - args['status'] = _dict.get('status') - if 'core_ml_enabled' in _dict: - args['core_ml_enabled'] = _dict.get('core_ml_enabled') - if 'explanation' in _dict: - args['explanation'] = _dict.get('explanation') - if 'created' in _dict: - args['created'] = string_to_datetime(_dict.get('created')) - if 'classes' in _dict: - args['classes'] = [ - Class._from_dict(x) for x in (_dict.get('classes')) - ] - if 'retrained' in _dict: - args['retrained'] = string_to_datetime(_dict.get('retrained')) - if 'updated' in _dict: - args['updated'] = string_to_datetime(_dict.get('updated')) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'classifier_id') and self.classifier_id is not None: - _dict['classifier_id'] = self.classifier_id - if hasattr(self, 'name') and self.name is not None: - _dict['name'] = self.name - if hasattr(self, 'owner') and self.owner is not None: - _dict['owner'] = self.owner - if hasattr(self, 'status') and self.status is not None: - _dict['status'] = self.status - if hasattr(self, - 'core_ml_enabled') and self.core_ml_enabled is not None: - _dict['core_ml_enabled'] = self.core_ml_enabled - if hasattr(self, 'explanation') and self.explanation is not None: - _dict['explanation'] = self.explanation - if hasattr(self, 'created') and self.created is not None: - _dict['created'] = datetime_to_string(self.created) - if hasattr(self, 'classes') and self.classes is not None: - _dict['classes'] = [x._to_dict() for x in self.classes] - if hasattr(self, 'retrained') and self.retrained is not None: - _dict['retrained'] = datetime_to_string(self.retrained) - if hasattr(self, 'updated') and self.updated is not None: - _dict['updated'] = datetime_to_string(self.updated) - return _dict - - def __str__(self): - """Return a `str` version of this Classifier object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class ClassifierResult(object): - """ - Classifier and score combination. - - :attr str name: Name of the classifier. - :attr str classifier_id: ID of a classifier identified in the image. - :attr list[ClassResult] classes: Classes within the classifier. - """ - - def __init__(self, name, classifier_id, classes): - """ - Initialize a ClassifierResult object. - - :param str name: Name of the classifier. - :param str classifier_id: ID of a classifier identified in the image. - :param list[ClassResult] classes: Classes within the classifier. - """ - self.name = name - self.classifier_id = classifier_id - self.classes = classes - - @classmethod - def _from_dict(cls, _dict): - """Initialize a ClassifierResult object from a json dictionary.""" - args = {} - if 'name' in _dict: - args['name'] = _dict.get('name') - else: - raise ValueError( - 'Required property \'name\' not present in ClassifierResult JSON' - ) - if 'classifier_id' in _dict: - args['classifier_id'] = _dict.get('classifier_id') - else: - raise ValueError( - 'Required property \'classifier_id\' not present in ClassifierResult JSON' - ) - if 'classes' in _dict: - args['classes'] = [ - ClassResult._from_dict(x) for x in (_dict.get('classes')) - ] - else: - raise ValueError( - 'Required property \'classes\' not present in ClassifierResult JSON' - ) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'name') and self.name is not None: - _dict['name'] = self.name - if hasattr(self, 'classifier_id') and self.classifier_id is not None: - _dict['classifier_id'] = self.classifier_id - if hasattr(self, 'classes') and self.classes is not None: - _dict['classes'] = [x._to_dict() for x in self.classes] - return _dict - - def __str__(self): - """Return a `str` version of this ClassifierResult object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Classifiers(object): - """ - A container for the list of classifiers. - - :attr list[Classifier] classifiers: List of classifiers. - """ - - def __init__(self, classifiers): - """ - Initialize a Classifiers object. - - :param list[Classifier] classifiers: List of classifiers. - """ - self.classifiers = classifiers - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Classifiers object from a json dictionary.""" - args = {} - if 'classifiers' in _dict: - args['classifiers'] = [ - Classifier._from_dict(x) for x in (_dict.get('classifiers')) - ] - else: - raise ValueError( - 'Required property \'classifiers\' not present in Classifiers JSON' - ) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'classifiers') and self.classifiers is not None: - _dict['classifiers'] = [x._to_dict() for x in self.classifiers] - return _dict - - def __str__(self): - """Return a `str` version of this Classifiers object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class DetectedFaces(object): - """ - Results for all faces. - - :attr int images_processed: Number of images processed for the API call. - :attr list[ImageWithFaces] images: The images. - :attr list[WarningInfo] warnings: (optional) Information about what might cause less - than optimal output. For example, a request sent with a corrupt .zip file and a list - of image URLs will still complete, but does not return the expected output. Not - returned when there is no warning. - """ - - def __init__(self, images_processed, images, warnings=None): - """ - Initialize a DetectedFaces object. - - :param int images_processed: Number of images processed for the API call. - :param list[ImageWithFaces] images: The images. - :param list[WarningInfo] warnings: (optional) Information about what might cause - less than optimal output. For example, a request sent with a corrupt .zip file and - a list of image URLs will still complete, but does not return the expected output. - Not returned when there is no warning. - """ - self.images_processed = images_processed - self.images = images - self.warnings = warnings - - @classmethod - def _from_dict(cls, _dict): - """Initialize a DetectedFaces object from a json dictionary.""" - args = {} - if 'images_processed' in _dict: - args['images_processed'] = _dict.get('images_processed') - else: - raise ValueError( - 'Required property \'images_processed\' not present in DetectedFaces JSON' - ) - if 'images' in _dict: - args['images'] = [ - ImageWithFaces._from_dict(x) for x in (_dict.get('images')) - ] - else: - raise ValueError( - 'Required property \'images\' not present in DetectedFaces JSON' - ) - if 'warnings' in _dict: - args['warnings'] = [ - WarningInfo._from_dict(x) for x in (_dict.get('warnings')) - ] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, - 'images_processed') and self.images_processed is not None: - _dict['images_processed'] = self.images_processed - if hasattr(self, 'images') and self.images is not None: - _dict['images'] = [x._to_dict() for x in self.images] - if hasattr(self, 'warnings') and self.warnings is not None: - _dict['warnings'] = [x._to_dict() for x in self.warnings] - return _dict - - def __str__(self): - """Return a `str` version of this DetectedFaces object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class ErrorInfo(object): - """ - Information about what might have caused a failure, such as an image that is too - large. Not returned when there is no error. - - :attr int code: HTTP status code. - :attr str description: Human-readable error description. For example, `File size limit - exceeded`. - :attr str error_id: Codified error string. For example, `limit_exceeded`. - """ - - def __init__(self, code, description, error_id): - """ - Initialize a ErrorInfo object. - - :param int code: HTTP status code. - :param str description: Human-readable error description. For example, `File size - limit exceeded`. - :param str error_id: Codified error string. For example, `limit_exceeded`. - """ - self.code = code - self.description = description - self.error_id = error_id - - @classmethod - def _from_dict(cls, _dict): - """Initialize a ErrorInfo object from a json dictionary.""" - args = {} - if 'code' in _dict: - args['code'] = _dict.get('code') - else: - raise ValueError( - 'Required property \'code\' not present in ErrorInfo JSON') - if 'description' in _dict: - args['description'] = _dict.get('description') - else: - raise ValueError( - 'Required property \'description\' not present in ErrorInfo JSON' - ) - if 'error_id' in _dict: - args['error_id'] = _dict.get('error_id') - else: - raise ValueError( - 'Required property \'error_id\' not present in ErrorInfo JSON') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'code') and self.code is not None: - _dict['code'] = self.code - if hasattr(self, 'description') and self.description is not None: - _dict['description'] = self.description - if hasattr(self, 'error_id') and self.error_id is not None: - _dict['error_id'] = self.error_id - return _dict - - def __str__(self): - """Return a `str` version of this ErrorInfo object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class Face(object): - """ - Information about the face. - - :attr FaceAge age: (optional) Age information about a face. - :attr FaceGender gender: (optional) Information about the gender of the face. - :attr FaceLocation face_location: (optional) The location of the bounding box around - the face. - """ - - def __init__(self, age=None, gender=None, face_location=None): - """ - Initialize a Face object. - - :param FaceAge age: (optional) Age information about a face. - :param FaceGender gender: (optional) Information about the gender of the face. - :param FaceLocation face_location: (optional) The location of the bounding box - around the face. - """ - self.age = age - self.gender = gender - self.face_location = face_location - - @classmethod - def _from_dict(cls, _dict): - """Initialize a Face object from a json dictionary.""" - args = {} - if 'age' in _dict: - args['age'] = FaceAge._from_dict(_dict.get('age')) - if 'gender' in _dict: - args['gender'] = FaceGender._from_dict(_dict.get('gender')) - if 'face_location' in _dict: - args['face_location'] = FaceLocation._from_dict( - _dict.get('face_location')) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'age') and self.age is not None: - _dict['age'] = self.age._to_dict() - if hasattr(self, 'gender') and self.gender is not None: - _dict['gender'] = self.gender._to_dict() - if hasattr(self, 'face_location') and self.face_location is not None: - _dict['face_location'] = self.face_location._to_dict() - return _dict - - def __str__(self): - """Return a `str` version of this Face object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class FaceAge(object): - """ - Age information about a face. - - :attr int min: (optional) Estimated minimum age. - :attr int max: (optional) Estimated maximum age. - :attr float score: Confidence score in the range of 0 to 1. A higher score indicates - greater confidence in the estimated value for the property. - """ - - def __init__(self, score, min=None, max=None): - """ - Initialize a FaceAge object. - - :param float score: Confidence score in the range of 0 to 1. A higher score - indicates greater confidence in the estimated value for the property. - :param int min: (optional) Estimated minimum age. - :param int max: (optional) Estimated maximum age. - """ - self.min = min - self.max = max - self.score = score - - @classmethod - def _from_dict(cls, _dict): - """Initialize a FaceAge object from a json dictionary.""" - args = {} - if 'min' in _dict: - args['min'] = _dict.get('min') - if 'max' in _dict: - args['max'] = _dict.get('max') - if 'score' in _dict: - args['score'] = _dict.get('score') - else: - raise ValueError( - 'Required property \'score\' not present in FaceAge JSON') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'min') and self.min is not None: - _dict['min'] = self.min - if hasattr(self, 'max') and self.max is not None: - _dict['max'] = self.max - if hasattr(self, 'score') and self.score is not None: - _dict['score'] = self.score - return _dict - - def __str__(self): - """Return a `str` version of this FaceAge object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class FaceGender(object): - """ - Information about the gender of the face. - - :attr str gender: Gender identified by the face. For example, `MALE` or `FEMALE`. - :attr str gender_label: The word for "male" or "female" in the language defined by the - **Accept-Language** request header. - :attr float score: Confidence score in the range of 0 to 1. A higher score indicates - greater confidence in the estimated value for the property. - """ - - def __init__(self, gender, gender_label, score): - """ - Initialize a FaceGender object. - - :param str gender: Gender identified by the face. For example, `MALE` or `FEMALE`. - :param str gender_label: The word for "male" or "female" in the language defined - by the **Accept-Language** request header. - :param float score: Confidence score in the range of 0 to 1. A higher score - indicates greater confidence in the estimated value for the property. - """ - self.gender = gender - self.gender_label = gender_label - self.score = score - - @classmethod - def _from_dict(cls, _dict): - """Initialize a FaceGender object from a json dictionary.""" - args = {} - if 'gender' in _dict: - args['gender'] = _dict.get('gender') - else: - raise ValueError( - 'Required property \'gender\' not present in FaceGender JSON') - if 'gender_label' in _dict: - args['gender_label'] = _dict.get('gender_label') - else: - raise ValueError( - 'Required property \'gender_label\' not present in FaceGender JSON' - ) - if 'score' in _dict: - args['score'] = _dict.get('score') - else: - raise ValueError( - 'Required property \'score\' not present in FaceGender JSON') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'gender') and self.gender is not None: - _dict['gender'] = self.gender - if hasattr(self, 'gender_label') and self.gender_label is not None: - _dict['gender_label'] = self.gender_label - if hasattr(self, 'score') and self.score is not None: - _dict['score'] = self.score - return _dict - - def __str__(self): - """Return a `str` version of this FaceGender object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class FaceLocation(object): - """ - The location of the bounding box around the face. - - :attr float width: Width in pixels of face region. - :attr float height: Height in pixels of face region. - :attr float left: X-position of top-left pixel of face region. - :attr float top: Y-position of top-left pixel of face region. - """ - - def __init__(self, width, height, left, top): - """ - Initialize a FaceLocation object. - - :param float width: Width in pixels of face region. - :param float height: Height in pixels of face region. - :param float left: X-position of top-left pixel of face region. - :param float top: Y-position of top-left pixel of face region. - """ - self.width = width - self.height = height - self.left = left - self.top = top - - @classmethod - def _from_dict(cls, _dict): - """Initialize a FaceLocation object from a json dictionary.""" - args = {} - if 'width' in _dict: - args['width'] = _dict.get('width') - else: - raise ValueError( - 'Required property \'width\' not present in FaceLocation JSON') - if 'height' in _dict: - args['height'] = _dict.get('height') - else: - raise ValueError( - 'Required property \'height\' not present in FaceLocation JSON') - if 'left' in _dict: - args['left'] = _dict.get('left') - else: - raise ValueError( - 'Required property \'left\' not present in FaceLocation JSON') - if 'top' in _dict: - args['top'] = _dict.get('top') - else: - raise ValueError( - 'Required property \'top\' not present in FaceLocation JSON') - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'width') and self.width is not None: - _dict['width'] = self.width - if hasattr(self, 'height') and self.height is not None: - _dict['height'] = self.height - if hasattr(self, 'left') and self.left is not None: - _dict['left'] = self.left - if hasattr(self, 'top') and self.top is not None: - _dict['top'] = self.top - return _dict - - def __str__(self): - """Return a `str` version of this FaceLocation object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class ImageWithFaces(object): - """ - Information about faces in the image. - - :attr list[Face] faces: Faces detected in the images. - :attr str image: (optional) Relative path of the image file if uploaded directly. Not - returned when the image is passed by URL. - :attr str source_url: (optional) Source of the image before any redirects. Not - returned when the image is uploaded. - :attr str resolved_url: (optional) Fully resolved URL of the image after redirects are - followed. Not returned when the image is uploaded. - :attr ErrorInfo error: (optional) Information about what might have caused a failure, - such as an image that is too large. Not returned when there is no error. - """ - - def __init__(self, - faces, - image=None, - source_url=None, - resolved_url=None, - error=None): - """ - Initialize a ImageWithFaces object. - - :param list[Face] faces: Faces detected in the images. - :param str image: (optional) Relative path of the image file if uploaded directly. - Not returned when the image is passed by URL. - :param str source_url: (optional) Source of the image before any redirects. Not - returned when the image is uploaded. - :param str resolved_url: (optional) Fully resolved URL of the image after - redirects are followed. Not returned when the image is uploaded. - :param ErrorInfo error: (optional) Information about what might have caused a - failure, such as an image that is too large. Not returned when there is no error. - """ - self.faces = faces - self.image = image - self.source_url = source_url - self.resolved_url = resolved_url - self.error = error - - @classmethod - def _from_dict(cls, _dict): - """Initialize a ImageWithFaces object from a json dictionary.""" - args = {} - if 'faces' in _dict: - args['faces'] = [Face._from_dict(x) for x in (_dict.get('faces'))] - else: - raise ValueError( - 'Required property \'faces\' not present in ImageWithFaces JSON' - ) - if 'image' in _dict: - args['image'] = _dict.get('image') - if 'source_url' in _dict: - args['source_url'] = _dict.get('source_url') - if 'resolved_url' in _dict: - args['resolved_url'] = _dict.get('resolved_url') - if 'error' in _dict: - args['error'] = ErrorInfo._from_dict(_dict.get('error')) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'faces') and self.faces is not None: - _dict['faces'] = [x._to_dict() for x in self.faces] - if hasattr(self, 'image') and self.image is not None: - _dict['image'] = self.image - if hasattr(self, 'source_url') and self.source_url is not None: - _dict['source_url'] = self.source_url - if hasattr(self, 'resolved_url') and self.resolved_url is not None: - _dict['resolved_url'] = self.resolved_url - if hasattr(self, 'error') and self.error is not None: - _dict['error'] = self.error._to_dict() - return _dict - - def __str__(self): - """Return a `str` version of this ImageWithFaces object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - -class WarningInfo(object): - """ - Information about something that went wrong. - - :attr str warning_id: Codified warning string, such as `limit_reached`. - :attr str description: Information about the error. - """ - - def __init__(self, warning_id, description): - """ - Initialize a WarningInfo object. - - :param str warning_id: Codified warning string, such as `limit_reached`. - :param str description: Information about the error. - """ - self.warning_id = warning_id - self.description = description - - @classmethod - def _from_dict(cls, _dict): - """Initialize a WarningInfo object from a json dictionary.""" - args = {} - if 'warning_id' in _dict: - args['warning_id'] = _dict.get('warning_id') - else: - raise ValueError( - 'Required property \'warning_id\' not present in WarningInfo JSON' - ) - if 'description' in _dict: - args['description'] = _dict.get('description') - else: - raise ValueError( - 'Required property \'description\' not present in WarningInfo JSON' - ) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'warning_id') and self.warning_id is not None: - _dict['warning_id'] = self.warning_id - if hasattr(self, 'description') and self.description is not None: - _dict['description'] = self.description - return _dict - - def __str__(self): - """Return a `str` version of this WarningInfo object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other diff --git a/ibm_watson/websocket/__init__.py b/ibm_watson/websocket/__init__.py index ed6564545..f50ad9fdf 100644 --- a/ibm_watson/websocket/__init__.py +++ b/ibm_watson/websocket/__init__.py @@ -1,6 +1,6 @@ # coding: utf-8 -# Copyright 2018 IBM All Rights Reserved. +# (C) Copyright IBM Corp. 2018, 2020. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/ibm_watson/websocket/audio_source.py b/ibm_watson/websocket/audio_source.py index b33930578..181eeab18 100644 --- a/ibm_watson/websocket/audio_source.py +++ b/ibm_watson/websocket/audio_source.py @@ -1,6 +1,6 @@ # coding: utf-8 -# Copyright 2018 IBM All Rights Reserved. +# (C) Copyright IBM Corp. 2018, 2020. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. + class AudioSource(object): """"Audio source for the speech to text recognize using websocket""" diff --git a/ibm_watson/websocket/recognize_abstract_callback.py b/ibm_watson/websocket/recognize_abstract_callback.py index ffbb4bfeb..a8574c6d0 100644 --- a/ibm_watson/websocket/recognize_abstract_callback.py +++ b/ibm_watson/websocket/recognize_abstract_callback.py @@ -1,6 +1,6 @@ # coding: utf-8 -# Copyright 2018 IBM All Rights Reserved. +# (C) Copyright IBM Corp. 2018, 2020. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,53 +16,46 @@ class RecognizeCallback(object): + def __init__(self): pass def on_transcription(self, transcript): """ - Called after the service returns the final result for the transcription. - """ - pass + Called after the service returns the final result for the transcription. + """ def on_connected(self): """ - Called when a Websocket connection was made - """ - pass + Called when a Websocket connection was made + """ def on_error(self, error): """ - Called when there is an error in the Websocket connection. - """ - pass + Called when there is an error in the Websocket connection. + """ def on_inactivity_timeout(self, error): """ - Called when there is an inactivity timeout. - """ - pass + Called when there is an inactivity timeout. + """ def on_listening(self): """ - Called when the service is listening for audio. - """ - pass + Called when the service is listening for audio. + """ def on_hypothesis(self, hypothesis): """ - Called when an interim result is received. - """ - pass + Called when an interim result is received. + """ def on_data(self, data): """ - Called when the service returns results. The data is returned unparsed. - """ - pass + Called when the service returns results. The data is returned unparsed. + """ def on_close(self): """ - Called when the Websocket connection is closed - """ - pass + Called when the Websocket connection is closed + """ diff --git a/ibm_watson/websocket/recognize_listener.py b/ibm_watson/websocket/recognize_listener.py index e1471c0cb..43eb79618 100644 --- a/ibm_watson/websocket/recognize_listener.py +++ b/ibm_watson/websocket/recognize_listener.py @@ -1,6 +1,6 @@ # coding: utf-8 -# Copyright 2018 IBM All Rights Reserved. +# (C) Copyright IBM Corp. 2018, 2020. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -31,7 +31,9 @@ START = "start" STOP = "stop" + class RecognizeListener(object): + def __init__(self, audio_source, options, @@ -51,8 +53,6 @@ def __init__(self, self.isListening = False self.verify = verify - # websocket.enableTrace(True) - self.ws_client = websocket.WebSocketApp( self.url, header=self.headers, @@ -64,7 +64,8 @@ def __init__(self, self.ws_client.run_forever(http_proxy_host=self.http_proxy_host, http_proxy_port=self.http_proxy_port, - sslopt={"cert_reqs": ssl.CERT_NONE} if self.verify is not None else None) + sslopt={"cert_reqs": ssl.CERT_NONE} + if self.verify is not None else None) @classmethod def build_start_message(cls, options): @@ -102,6 +103,7 @@ def send_audio(self, ws): :param ws: Websocket client """ + def run(*args): """Background process to stream the data""" if not self.audio_source.is_buffer: @@ -118,7 +120,8 @@ def run(*args): try: if not self.audio_source.input.empty(): chunk = self.audio_source.input.get() - self.ws_client.send(chunk, websocket.ABNF.OPCODE_BINARY) + self.ws_client.send(chunk, + websocket.ABNF.OPCODE_BINARY) time.sleep(TEN_MILLISECONDS) if self.audio_source.input.empty(): if self.audio_source.is_recording: @@ -132,7 +135,8 @@ def run(*args): break time.sleep(TEN_MILLISECONDS) - self.ws_client.send(self.build_closing_message(), websocket.ABNF.OPCODE_TEXT) + self.ws_client.send(self.build_closing_message(), + websocket.ABNF.OPCODE_TEXT) thread.start_new_thread(run, ()) @@ -147,7 +151,8 @@ def on_open(self, ws): # Send initialization message init_data = self.build_start_message(self.options) - self.ws_client.send(json.dumps(init_data).encode('utf8'), websocket.ABNF.OPCODE_TEXT) + self.ws_client.send( + json.dumps(init_data).encode('utf8'), websocket.ABNF.OPCODE_TEXT) def on_data(self, ws, message, message_type, fin): """ @@ -187,18 +192,30 @@ def on_data(self, ws, message, message_type, fin): # if in streaming elif 'results' in json_object or 'speaker_labels' in json_object: - hypothesis = '' - if 'results' in json_object: - hypothesis = json_object['results'][0]['alternatives'][0][ - 'transcript'] - b_final = (json_object['results'][0]['final'] is True) - transcripts = self.extract_transcripts( - json_object['results'][0]['alternatives']) - - if b_final: - self.callback.on_transcription(transcripts) - - self.callback.on_hypothesis(hypothesis) + # If results are present, extract the hypothesis and, if finalized, the full + # set of transcriptions and send them to the appropriate callbacks. + results = json_object.get('results') + if results: + if (self.options.get('interim_results') is True): + b_final = (results[0].get('final') is True) + alternatives = results[0].get('alternatives') + if alternatives: + hypothesis = alternatives[0].get('transcript') + transcripts = self.extract_transcripts(alternatives) + if b_final: + self.callback.on_transcription(transcripts) + if hypothesis: + self.callback.on_hypothesis(hypothesis) + else: + final_transcript = [] + for result in results: + transcript = self.extract_transcripts( + result.get('alternatives')) + final_transcript.append(transcript) + + self.callback.on_transcription(final_transcript) + + # Always call the on_data callback if 'results' or 'speaker_labels' are present self.callback.on_data(json_object) def on_error(self, ws, error): @@ -210,7 +227,7 @@ def on_error(self, ws, error): """ self.callback.on_error(error) - def on_close(self, ws): + def on_close(self, ws, *args): """ Callback executed when websocket connection is closed diff --git a/ibm_watson/websocket/synthesize_callback.py b/ibm_watson/websocket/synthesize_callback.py index 70c7a6075..ec62ea493 100644 --- a/ibm_watson/websocket/synthesize_callback.py +++ b/ibm_watson/websocket/synthesize_callback.py @@ -1,6 +1,6 @@ # coding: utf-8 -# Copyright 2018 IBM All Rights Reserved. +# (C) Copyright IBM Corp. 2018, 2020. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,48 +16,41 @@ class SynthesizeCallback(object): + def __init__(self): pass def on_connected(self): """ - Called when a Websocket connection was made - """ - pass + Called when a Websocket connection was made + """ def on_error(self, error): """ - Called when there is an error in the Websocket connection. - """ - pass - + Called when there is an error in the Websocket connection. + """ def on_content_type(self, content_type): """ - Called when the service responds with the format of the audio response - """ - pass + Called when the service responds with the format of the audio response + """ def on_timing_information(self, timing_information): """ - Called when the service returns timing information - """ - pass + Called when the service returns timing information + """ def on_audio_stream(self, audio_stream): """ - Called when the service sends the synthesized audio as a binary stream of data in the indicated format. - """ - pass + Called when the service sends the synthesized audio as a binary stream of data in the indicated format. + """ def on_data(self, data): """ - Called when the service returns results. The data is returned unparsed. - """ - pass + Called when the service returns results. The data is returned unparsed. + """ def on_close(self): """ - Called when the Websocket connection is closed - """ - pass + Called when the Websocket connection is closed + """ diff --git a/ibm_watson/websocket/synthesize_listener.py b/ibm_watson/websocket/synthesize_listener.py index 905e02ba3..33caf81d5 100644 --- a/ibm_watson/websocket/synthesize_listener.py +++ b/ibm_watson/websocket/synthesize_listener.py @@ -1,6 +1,6 @@ # coding: utf-8 -# Copyright 2018 IBM All Rights Reserved. +# (C) Copyright IBM Corp. 2018, 2020. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,10 +23,11 @@ except ImportError: import _thread as thread - TEN_MILLISECONDS = 0.01 + class SynthesizeListener(object): + def __init__(self, options, callback, @@ -43,8 +44,6 @@ def __init__(self, self.http_proxy_port = http_proxy_port self.verify = verify - # websocket.enableTrace(True) - self.ws_client = websocket.WebSocketApp( self.url, header=self.headers, @@ -56,13 +55,16 @@ def __init__(self, self.ws_client.run_forever(http_proxy_host=self.http_proxy_host, http_proxy_port=self.http_proxy_port, - sslopt={'cert_reqs': ssl.CERT_NONE} if self.verify is not None else None) + suppress_origin=True, + sslopt={'cert_reqs': ssl.CERT_NONE} + if self.verify is not None else None) def send_text(self): """ Sends the text message Note: The service handles one request per connection """ + def run(*args): """Background process to send the text""" self.ws_client.send(json.dumps(self.options).encode('utf8')) @@ -94,7 +96,8 @@ def on_data(self, ws, message, message_type, fin): if message_type == websocket.ABNF.OPCODE_TEXT: json_object = json.loads(message) if 'binary_streams' in json_object: - self.callback.on_content_type(json_object['binary_streams'][0]['content_type']) + self.callback.on_content_type( + json_object['binary_streams'][0]['content_type']) elif 'error' in json_object: self.on_error(ws, json_object.get('error')) return @@ -117,7 +120,7 @@ def on_error(self, ws, error): """ self.callback.on_error(error) - def on_close(self, ws, **kwargs): + def on_close(self, ws, *args, **kwargs): """ Callback executed when websocket connection is closed diff --git a/package-lock.json b/package-lock.json new file mode 100644 index 000000000..073021c0e --- /dev/null +++ b/package-lock.json @@ -0,0 +1,838 @@ +{ + "requires": true, + "lockfileVersion": 1, + "dependencies": { + "@babel/code-frame": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.14.5.tgz", + "integrity": "sha512-9pzDqyc6OLDaqe+zbACgFkb6fKMNG6CObKpnYXChRsvYGyEdc7CA2BaqeOM+vOtCS5ndmJicPJhKAwYRI6UfFw==", + "requires": { + "@babel/highlight": "^7.14.5" + } + }, + "@babel/helper-validator-identifier": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.14.5.tgz", + "integrity": "sha512-5lsetuxCLilmVGyiLEfoHBRX8UCFD+1m2x3Rj97WrW3V7H3u4RWRXA4evMjImCsin2J2YT0QaVDGf+z8ondbAg==" + }, + "@babel/highlight": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.14.5.tgz", + "integrity": "sha512-qf9u2WFWVV0MppaL877j2dBtQIDgmidgjGk5VIMw3OadXvYaXn66U1BFlH2t4+t3i+8PhedppRv+i40ABzd+gg==", + "requires": { + "@babel/helper-validator-identifier": "^7.14.5", + "chalk": "^2.0.0", + "js-tokens": "^4.0.0" + } + }, + "@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "requires": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + } + }, + "@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==" + }, + "@nodelib/fs.walk": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.7.tgz", + "integrity": "sha512-BTIhocbPBSrRmHxOAJFtR18oLhxTtAFDAvL8hY1S3iU8k+E60W/YFs4jrixGzQjMpF4qPXxIQHcjVD9dz1C2QA==", + "requires": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + } + }, + "@octokit/auth-token": { + "version": "2.4.5", + "resolved": "https://registry.npmjs.org/@octokit/auth-token/-/auth-token-2.4.5.tgz", + "integrity": "sha512-BpGYsPgJt05M7/L/5FoE1PiAbdxXFZkX/3kDYcsvd1v6UhlnE5e96dTDr0ezX/EFwciQxf3cNV0loipsURU+WA==", + "requires": { + "@octokit/types": "^6.0.3" + } + }, + "@octokit/core": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/@octokit/core/-/core-3.4.0.tgz", + "integrity": "sha512-6/vlKPP8NF17cgYXqucdshWqmMZGXkuvtcrWCgU5NOI0Pl2GjlmZyWgBMrU8zJ3v2MJlM6++CiB45VKYmhiWWg==", + "requires": { + "@octokit/auth-token": "^2.4.4", + "@octokit/graphql": "^4.5.8", + "@octokit/request": "^5.4.12", + "@octokit/request-error": "^2.0.5", + "@octokit/types": "^6.0.3", + "before-after-hook": "^2.2.0", + "universal-user-agent": "^6.0.0" + } + }, + "@octokit/endpoint": { + "version": "6.0.11", + "resolved": "https://registry.npmjs.org/@octokit/endpoint/-/endpoint-6.0.11.tgz", + "integrity": "sha512-fUIPpx+pZyoLW4GCs3yMnlj2LfoXTWDUVPTC4V3MUEKZm48W+XYpeWSZCv+vYF1ZABUm2CqnDVf1sFtIYrj7KQ==", + "requires": { + "@octokit/types": "^6.0.3", + "is-plain-object": "^5.0.0", + "universal-user-agent": "^6.0.0" + } + }, + "@octokit/graphql": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/@octokit/graphql/-/graphql-4.6.2.tgz", + "integrity": "sha512-WmsIR1OzOr/3IqfG9JIczI8gMJUMzzyx5j0XXQ4YihHtKlQc+u35VpVoOXhlKAlaBntvry1WpAzPl/a+s3n89Q==", + "requires": { + "@octokit/request": "^5.3.0", + "@octokit/types": "^6.0.3", + "universal-user-agent": "^6.0.0" + } + }, + "@octokit/openapi-types": { + "version": "7.3.0", + "resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-7.3.0.tgz", + "integrity": "sha512-o00X2FCLiEeXZkm1Ab5nvPUdVOlrpediwWZkpizUJ/xtZQsJ4FiQ2RB/dJEmb0Nk+NIz7zyDePcSCu/Y/0M3Ew==" + }, + "@octokit/plugin-paginate-rest": { + "version": "2.13.3", + "resolved": "https://registry.npmjs.org/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-2.13.3.tgz", + "integrity": "sha512-46lptzM9lTeSmIBt/sVP/FLSTPGx6DCzAdSX3PfeJ3mTf4h9sGC26WpaQzMEq/Z44cOcmx8VsOhO+uEgE3cjYg==", + "requires": { + "@octokit/types": "^6.11.0" + } + }, + "@octokit/plugin-request-log": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@octokit/plugin-request-log/-/plugin-request-log-1.0.3.tgz", + "integrity": "sha512-4RFU4li238jMJAzLgAwkBAw+4Loile5haQMQr+uhFq27BmyJXcXSKvoQKqh0agsZEiUlW6iSv3FAgvmGkur7OQ==" + }, + "@octokit/plugin-rest-endpoint-methods": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/@octokit/plugin-rest-endpoint-methods/-/plugin-rest-endpoint-methods-5.3.1.tgz", + "integrity": "sha512-3B2iguGmkh6bQQaVOtCsS0gixrz8Lg0v4JuXPqBcFqLKuJtxAUf3K88RxMEf/naDOI73spD+goJ/o7Ie7Cvdjg==", + "requires": { + "@octokit/types": "^6.16.2", + "deprecation": "^2.3.1" + } + }, + "@octokit/request": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/@octokit/request/-/request-5.5.0.tgz", + "integrity": "sha512-jxbMLQdQ3heFMZUaTLSCqcKs2oAHEYh7SnLLXyxbZmlULExZ/RXai7QUWWFKowcGGPlCZuKTZg0gSKHWrfYEoQ==", + "requires": { + "@octokit/endpoint": "^6.0.1", + "@octokit/request-error": "^2.0.0", + "@octokit/types": "^6.16.1", + "is-plain-object": "^5.0.0", + "node-fetch": "^2.6.1", + "universal-user-agent": "^6.0.0" + } + }, + "@octokit/request-error": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@octokit/request-error/-/request-error-2.0.5.tgz", + "integrity": "sha512-T/2wcCFyM7SkXzNoyVNWjyVlUwBvW3igM3Btr/eKYiPmucXTtkxt2RBsf6gn3LTzaLSLTQtNmvg+dGsOxQrjZg==", + "requires": { + "@octokit/types": "^6.0.3", + "deprecation": "^2.0.0", + "once": "^1.4.0" + } + }, + "@octokit/rest": { + "version": "18.5.6", + "resolved": "https://registry.npmjs.org/@octokit/rest/-/rest-18.5.6.tgz", + "integrity": "sha512-8HdG6ZjQdZytU6tCt8BQ2XLC7EJ5m4RrbyU/EARSkAM1/HP3ceOzMG/9atEfe17EDMer3IVdHWLedz2wDi73YQ==", + "requires": { + "@octokit/core": "^3.2.3", + "@octokit/plugin-paginate-rest": "^2.6.2", + "@octokit/plugin-request-log": "^1.0.2", + "@octokit/plugin-rest-endpoint-methods": "5.3.1" + } + }, + "@octokit/types": { + "version": "6.16.2", + "resolved": "https://registry.npmjs.org/@octokit/types/-/types-6.16.2.tgz", + "integrity": "sha512-wWPSynU4oLy3i4KGyk+J1BLwRKyoeW2TwRHgwbDz17WtVFzSK2GOErGliruIx8c+MaYtHSYTx36DSmLNoNbtgA==", + "requires": { + "@octokit/openapi-types": "^7.2.3" + } + }, + "@semantic-release/changelog": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/@semantic-release/changelog/-/changelog-5.0.1.tgz", + "integrity": "sha512-unvqHo5jk4dvAf2nZ3aw4imrlwQ2I50eVVvq9D47Qc3R+keNqepx1vDYwkjF8guFXnOYaYcR28yrZWno1hFbiw==", + "requires": { + "@semantic-release/error": "^2.1.0", + "aggregate-error": "^3.0.0", + "fs-extra": "^9.0.0", + "lodash": "^4.17.4" + } + }, + "@semantic-release/error": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/@semantic-release/error/-/error-2.2.0.tgz", + "integrity": "sha512-9Tj/qn+y2j+sjCI3Jd+qseGtHjOAeg7dU2/lVcqIQ9TV3QDaDXDYXcoOHU+7o2Hwh8L8ymL4gfuO7KxDs3q2zg==" + }, + "@semantic-release/exec": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/@semantic-release/exec/-/exec-5.0.0.tgz", + "integrity": "sha512-t7LWXIvDJQbuGCy2WmMG51WyaGSLTvZBv9INvcI4S0kn+QjnnVVUMhcioIqhb0r3yqqarMzHVcABFug0q0OXjw==", + "requires": { + "@semantic-release/error": "^2.1.0", + "aggregate-error": "^3.0.0", + "debug": "^4.0.0", + "execa": "^4.0.0", + "lodash": "^4.17.4", + "parse-json": "^5.0.0" + } + }, + "@semantic-release/git": { + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/@semantic-release/git/-/git-9.0.0.tgz", + "integrity": "sha512-AZ4Zha5NAPAciIJH3ipzw/WU9qLAn8ENaoVAhD6srRPxTpTzuV3NhNh14rcAo8Paj9dO+5u4rTKcpetOBluYVw==", + "requires": { + "@semantic-release/error": "^2.1.0", + "aggregate-error": "^3.0.0", + "debug": "^4.0.0", + "dir-glob": "^3.0.0", + "execa": "^4.0.0", + "lodash": "^4.17.4", + "micromatch": "^4.0.0", + "p-reduce": "^2.0.0" + } + }, + "@semantic-release/github": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/@semantic-release/github/-/github-7.2.3.tgz", + "integrity": "sha512-lWjIVDLal+EQBzy697ayUNN8MoBpp+jYIyW2luOdqn5XBH4d9bQGfTnjuLyzARZBHejqh932HVjiH/j4+R7VHw==", + "requires": { + "@octokit/rest": "^18.0.0", + "@semantic-release/error": "^2.2.0", + "aggregate-error": "^3.0.0", + "bottleneck": "^2.18.1", + "debug": "^4.0.0", + "dir-glob": "^3.0.0", + "fs-extra": "^10.0.0", + "globby": "^11.0.0", + "http-proxy-agent": "^4.0.0", + "https-proxy-agent": "^5.0.0", + "issue-parser": "^6.0.0", + "lodash": "^4.17.4", + "mime": "^2.4.3", + "p-filter": "^2.0.0", + "p-retry": "^4.0.0", + "url-join": "^4.0.0" + }, + "dependencies": { + "fs-extra": { + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.0.0.tgz", + "integrity": "sha512-C5owb14u9eJwizKGdchcDUQeFtlSHHthBk8pbX9Vc1PFZrLombudjDnNns88aYslCyF6IY5SUw3Roz6xShcEIQ==", + "requires": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + } + } + } + }, + "@tootallnate/once": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-1.1.2.tgz", + "integrity": "sha512-RbzJvlNzmRq5c3O09UipeuXno4tA1FE6ikOjxZK0tuxVv3412l64l5t1W5pj4+rJq9vpkm/kwiR07aZXnsKPxw==" + }, + "@types/retry": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz", + "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==" + }, + "agent-base": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", + "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", + "requires": { + "debug": "4" + } + }, + "aggregate-error": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", + "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", + "requires": { + "clean-stack": "^2.0.0", + "indent-string": "^4.0.0" + } + }, + "ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "requires": { + "color-convert": "^1.9.0" + } + }, + "array-union": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==" + }, + "at-least-node": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/at-least-node/-/at-least-node-1.0.0.tgz", + "integrity": "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==" + }, + "before-after-hook": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/before-after-hook/-/before-after-hook-2.2.2.tgz", + "integrity": "sha512-3pZEU3NT5BFUo/AD5ERPWOgQOCZITni6iavr5AUw5AUwQjMlI0kzu5btnyD39AF0gUEsDPwJT+oY1ORBJijPjQ==" + }, + "bottleneck": { + "version": "2.19.5", + "resolved": "https://registry.npmjs.org/bottleneck/-/bottleneck-2.19.5.tgz", + "integrity": "sha512-VHiNCbI1lKdl44tGrhNfU3lup0Tj/ZBMJB5/2ZbNXRCPuRCO7ed2mgcK4r17y+KB2EfuYuRaVlwNbAeaWGSpbw==" + }, + "braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "requires": { + "fill-range": "^7.0.1" + } + }, + "chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "requires": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + } + }, + "clean-stack": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", + "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==" + }, + "color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "requires": { + "color-name": "1.1.3" + } + }, + "color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=" + }, + "cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "requires": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + } + }, + "debug": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", + "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", + "requires": { + "ms": "2.1.2" + } + }, + "deprecation": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/deprecation/-/deprecation-2.3.1.tgz", + "integrity": "sha512-xmHIy4F3scKVwMsQ4WnVaS8bHOx0DmVwRywosKhaILI0ywMDWPtBSku2HNxRvF7jtwDRsoEwYQSfbxj8b7RlJQ==" + }, + "dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "requires": { + "path-type": "^4.0.0" + } + }, + "end-of-stream": { + "version": "1.4.4", + "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", + "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", + "requires": { + "once": "^1.4.0" + } + }, + "error-ex": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", + "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", + "requires": { + "is-arrayish": "^0.2.1" + } + }, + "escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=" + }, + "execa": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/execa/-/execa-4.1.0.tgz", + "integrity": "sha512-j5W0//W7f8UxAn8hXVnwG8tLwdiUy4FJLcSupCg6maBYZDpyBvTApK7KyuI4bKj8KOh1r2YH+6ucuYtJv1bTZA==", + "requires": { + "cross-spawn": "^7.0.0", + "get-stream": "^5.0.0", + "human-signals": "^1.1.1", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.0", + "onetime": "^5.1.0", + "signal-exit": "^3.0.2", + "strip-final-newline": "^2.0.0" + } + }, + "fast-glob": { + "version": "3.2.5", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.5.tgz", + "integrity": "sha512-2DtFcgT68wiTTiwZ2hNdJfcHNke9XOfnwmBRWXhmeKM8rF0TGwmC/Qto3S7RoZKp5cilZbxzO5iTNTQsJ+EeDg==", + "requires": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.0", + "merge2": "^1.3.0", + "micromatch": "^4.0.2", + "picomatch": "^2.2.1" + } + }, + "fastq": { + "version": "1.11.0", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.11.0.tgz", + "integrity": "sha512-7Eczs8gIPDrVzT+EksYBcupqMyxSHXXrHOLRRxU2/DicV8789MRBRR8+Hc2uWzUupOs4YS4JzBmBxjjCVBxD/g==", + "requires": { + "reusify": "^1.0.4" + } + }, + "fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "requires": { + "to-regex-range": "^5.0.1" + } + }, + "fs-extra": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", + "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", + "requires": { + "at-least-node": "^1.0.0", + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + } + }, + "get-stream": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz", + "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==", + "requires": { + "pump": "^3.0.0" + } + }, + "glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "requires": { + "is-glob": "^4.0.1" + } + }, + "globby": { + "version": "11.0.3", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.0.3.tgz", + "integrity": "sha512-ffdmosjA807y7+lA1NM0jELARVmYul/715xiILEjo3hBLPTcirgQNnXECn5g3mtR8TOLCVbkfua1Hpen25/Xcg==", + "requires": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.1.1", + "ignore": "^5.1.4", + "merge2": "^1.3.0", + "slash": "^3.0.0" + } + }, + "graceful-fs": { + "version": "4.2.6", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.6.tgz", + "integrity": "sha512-nTnJ528pbqxYanhpDYsi4Rd8MAeaBA67+RZ10CM1m3bTAVFEDcd5AuA4a6W5YkGZ1iNXHzZz8T6TBKLeBuNriQ==" + }, + "has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=" + }, + "http-proxy-agent": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-4.0.1.tgz", + "integrity": "sha512-k0zdNgqWTGA6aeIRVpvfVob4fL52dTfaehylg0Y4UvSySvOq/Y+BOyPrgpUrA7HylqvU8vIZGsRuXmspskV0Tg==", + "requires": { + "@tootallnate/once": "1", + "agent-base": "6", + "debug": "4" + } + }, + "https-proxy-agent": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.0.tgz", + "integrity": "sha512-EkYm5BcKUGiduxzSt3Eppko+PiNWNEpa4ySk9vTC6wDsQJW9rHSa+UhGNJoRYp7bz6Ht1eaRIa6QaJqO5rCFbA==", + "requires": { + "agent-base": "6", + "debug": "4" + } + }, + "human-signals": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-1.1.1.tgz", + "integrity": "sha512-SEQu7vl8KjNL2eoGBLF3+wAjpsNfA9XMlXAYj/3EdaNfAlxKthD1xjEQfGOUhllCGGJVNY34bRr6lPINhNjyZw==" + }, + "ignore": { + "version": "5.1.8", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.1.8.tgz", + "integrity": "sha512-BMpfD7PpiETpBl/A6S498BaIJ6Y/ABT93ETbby2fP00v4EbvPBXWEoaR1UBPKs3iR53pJY7EtZk5KACI57i1Uw==" + }, + "indent-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", + "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==" + }, + "is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0=" + }, + "is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=" + }, + "is-glob": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz", + "integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==", + "requires": { + "is-extglob": "^2.1.1" + } + }, + "is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==" + }, + "is-plain-object": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-5.0.0.tgz", + "integrity": "sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q==" + }, + "is-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.0.tgz", + "integrity": "sha512-XCoy+WlUr7d1+Z8GgSuXmpuUFC9fOhRXglJMx+dwLKTkL44Cjd4W1Z5P+BQZpr+cR93aGP4S/s7Ftw6Nd/kiEw==" + }, + "isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=" + }, + "issue-parser": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/issue-parser/-/issue-parser-6.0.0.tgz", + "integrity": "sha512-zKa/Dxq2lGsBIXQ7CUZWTHfvxPC2ej0KfO7fIPqLlHB9J2hJ7rGhZ5rilhuufylr4RXYPzJUeFjKxz305OsNlA==", + "requires": { + "lodash.capitalize": "^4.2.1", + "lodash.escaperegexp": "^4.1.2", + "lodash.isplainobject": "^4.0.6", + "lodash.isstring": "^4.0.1", + "lodash.uniqby": "^4.7.0" + } + }, + "js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" + }, + "json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==" + }, + "jsonfile": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", + "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", + "requires": { + "graceful-fs": "^4.1.6", + "universalify": "^2.0.0" + } + }, + "lines-and-columns": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.1.6.tgz", + "integrity": "sha1-HADHQ7QzzQpOgHWPe2SldEDZ/wA=" + }, + "lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" + }, + "lodash.capitalize": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/lodash.capitalize/-/lodash.capitalize-4.2.1.tgz", + "integrity": "sha1-+CbJtOKoUR2E46yinbBeGk87cqk=" + }, + "lodash.escaperegexp": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/lodash.escaperegexp/-/lodash.escaperegexp-4.1.2.tgz", + "integrity": "sha1-ZHYsSGGAglGKw99Mz11YhtriA0c=" + }, + "lodash.isplainobject": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz", + "integrity": "sha1-fFJqUtibRcRcxpC4gWO+BJf1UMs=" + }, + "lodash.isstring": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/lodash.isstring/-/lodash.isstring-4.0.1.tgz", + "integrity": "sha1-1SfftUVuynzJu5XV2ur4i6VKVFE=" + }, + "lodash.uniqby": { + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/lodash.uniqby/-/lodash.uniqby-4.7.0.tgz", + "integrity": "sha1-2ZwHpmnp5tJOE2Lf4mbGdhavEwI=" + }, + "merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==" + }, + "merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==" + }, + "micromatch": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.4.tgz", + "integrity": "sha512-pRmzw/XUcwXGpD9aI9q/0XOwLNygjETJ8y0ao0wdqprrzDa4YnxLcz7fQRZr8voh8V10kGhABbNcHVk5wHgWwg==", + "requires": { + "braces": "^3.0.1", + "picomatch": "^2.2.3" + } + }, + "mime": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/mime/-/mime-2.5.2.tgz", + "integrity": "sha512-tqkh47FzKeCPD2PUiPB6pkbMzsCasjxAfC62/Wap5qrUWcb+sFasXUC5I3gYM5iBM8v/Qpn4UK0x+j0iHyFPDg==" + }, + "mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==" + }, + "ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, + "node-fetch": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.1.tgz", + "integrity": "sha512-V4aYg89jEoVRxRb2fJdAg8FHvI7cEyYdVAh94HH0UIK8oJxUfkjlDQN9RbMx+bEjP7+ggMiFRprSti032Oipxw==" + }, + "npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "requires": { + "path-key": "^3.0.0" + } + }, + "once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", + "requires": { + "wrappy": "1" + } + }, + "onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "requires": { + "mimic-fn": "^2.1.0" + } + }, + "p-filter": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/p-filter/-/p-filter-2.1.0.tgz", + "integrity": "sha512-ZBxxZ5sL2HghephhpGAQdoskxplTwr7ICaehZwLIlfL6acuVgZPm8yBNuRAFBGEqtD/hmUeq9eqLg2ys9Xr/yw==", + "requires": { + "p-map": "^2.0.0" + } + }, + "p-map": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-2.1.0.tgz", + "integrity": "sha512-y3b8Kpd8OAN444hxfBbFfj1FY/RjtTd8tzYwhUqNYXx0fXx2iX4maP4Qr6qhIKbQXI02wTLAda4fYUbDagTUFw==" + }, + "p-reduce": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/p-reduce/-/p-reduce-2.1.0.tgz", + "integrity": "sha512-2USApvnsutq8uoxZBGbbWM0JIYLiEMJ9RlaN7fAzVNb9OZN0SHjjTTfIcb667XynS5Y1VhwDJVDa72TnPzAYWw==" + }, + "p-retry": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.5.0.tgz", + "integrity": "sha512-5Hwh4aVQSu6BEP+w2zKlVXtFAaYQe1qWuVADSgoeVlLjwe/Q/AMSoRR4MDeaAfu8llT+YNbEijWu/YF3m6avkg==", + "requires": { + "@types/retry": "^0.12.0", + "retry": "^0.12.0" + } + }, + "parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "requires": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + } + }, + "path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==" + }, + "path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==" + }, + "picomatch": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.0.tgz", + "integrity": "sha512-lY1Q/PiJGC2zOv/z391WOTD+Z02bCgsFfvxoXXf6h7kv9o+WmsmzYqrAwY63sNgOxE4xEdq0WyUnXfKeBrSvYw==" + }, + "pump": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", + "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", + "requires": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, + "queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==" + }, + "retry": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/retry/-/retry-0.12.0.tgz", + "integrity": "sha1-G0KmJmoh8HQh0bC1S33BZ7AcATs=" + }, + "reusify": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", + "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==" + }, + "run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "requires": { + "queue-microtask": "^1.2.2" + } + }, + "shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "requires": { + "shebang-regex": "^3.0.0" + } + }, + "shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==" + }, + "signal-exit": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.3.tgz", + "integrity": "sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA==" + }, + "slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==" + }, + "strip-final-newline": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==" + }, + "supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "requires": { + "has-flag": "^3.0.0" + } + }, + "to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "requires": { + "is-number": "^7.0.0" + } + }, + "universal-user-agent": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/universal-user-agent/-/universal-user-agent-6.0.0.tgz", + "integrity": "sha512-isyNax3wXoKaulPDZWHQqbmIx1k2tb9fb3GGDBRxCscfYV2Ch7WxPArBsFEG8s/safwXTT7H4QGhaIkTp9447w==" + }, + "universalify": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", + "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==" + }, + "url-join": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/url-join/-/url-join-4.0.1.tgz", + "integrity": "sha512-jk1+QP6ZJqyOiuEI9AEWQfju/nB2Pw466kbA0LEZljHwKeMgd9WrAEgEGxjPDD2+TNbbb37rTyhEfrCXfuKXnA==" + }, + "which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "requires": { + "isexe": "^2.0.0" + } + }, + "wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=" + } + } +} diff --git a/pylint.sh b/pylint.sh index 906f3921b..ad71867fb 100644 --- a/pylint.sh +++ b/pylint.sh @@ -1,8 +1,8 @@ #!/bin/bash -# Runs pylint only for Python 2.7.X +# Runs pylint only for Python 3.7 PYTHON_VERSION=$(python -c 'import sys; print(".".join(map(str, sys.version_info[:2])))') echo "Python version: $PYTHON_VERSION" -if [ $PYTHON_VERSION = '2.7' ]; then +if [ $PYTHON_VERSION = '3.7' ]; then pylint ibm_watson test examples fi diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 000000000..09eb447dc --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools", "wheel"] +build-backend = "setuptools.build_meta" + +[tool.semantic_release] +version_variables = [ + "setup.py:__version__", + "ibm_watson/version.py:__version__", +] +version_toml = [] +branch = "master" + +[tool.semantic_release.changelog] +exclude_commit_patterns = [ + '''chore(?:\([^)]*?\))?: .+''', + '''ci(?:\([^)]*?\))?: .+''', + '''refactor(?:\([^)]*?\))?: .+''', + '''test(?:\([^)]*?\))?: .+''', + '''build\((?!deps\): .+)''', +] \ No newline at end of file diff --git a/requirements-dev.txt b/requirements-dev.txt index 364178644..f04883ea7 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,21 +1,20 @@ # test dependencies -pytest>=2.8.2 -responses==0.9.0 -python_dotenv>=0.1.5;python_version!='3.2' -pylint>=1.4.4 -tox>=2.9.1 -pytest-rerunfailures>=3.1 -ibm_cloud_sdk_core>=0.2.0 +pytest==6.2.4 +responses==0.13.3 +python_dotenv==0.17.1;python_version!='3.2' +pylint==2.8.2 +pytest-rerunfailures==9.1.1 +ibm_cloud_sdk_core>=3.3.6, == 3.* # code coverage -coverage<5 +coverage>=4, <5 codecov>=1.6.3 pytest-cov>=2.2.1 # documentation -recommonmark>=0.2.0 -Sphinx>=1.3.1 -bumpversion>=0.5.3 +recommonmark==0.7.1 +Sphinx==3.5.2 +bumpversion==0.6.0 # Web sockets -websocket-client==0.48.0 +websocket-client>=1.1.0 diff --git a/requirements.txt b/requirements.txt index 9c43ea3ed..461b8746a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ requests>=2.0,<3.0 python_dateutil>=2.5.3 -websocket-client==0.48.0 -ibm_cloud_sdk_core>=0.2.0 \ No newline at end of file +websocket-client>=1.1.0 +ibm_cloud_sdk_core>=3.3.6, == 3.* diff --git a/resources/South_Africa_Luca_Galuzzi_2004.JPG b/resources/South_Africa_Luca_Galuzzi_2004.JPG new file mode 100755 index 000000000..673758771 Binary files /dev/null and b/resources/South_Africa_Luca_Galuzzi_2004.JPG differ diff --git a/resources/South_Africa_Luca_Galuzzi_2004.jpeg b/resources/South_Africa_Luca_Galuzzi_2004.jpeg new file mode 100755 index 000000000..673758771 Binary files /dev/null and b/resources/South_Africa_Luca_Galuzzi_2004.jpeg differ diff --git a/resources/TestEnrichments.csv b/resources/TestEnrichments.csv new file mode 100644 index 000000000..0acd7812b --- /dev/null +++ b/resources/TestEnrichments.csv @@ -0,0 +1,2 @@ +engine,gasket,piston,valves +flag,green,yellow,red \ No newline at end of file diff --git a/resources/hello_world.txt b/resources/hello_world.txt new file mode 100644 index 000000000..3b18e512d --- /dev/null +++ b/resources/hello_world.txt @@ -0,0 +1 @@ +hello world diff --git a/resources/ibm-credentials.env b/resources/ibm-credentials.env deleted file mode 100644 index 008cb4f94..000000000 --- a/resources/ibm-credentials.env +++ /dev/null @@ -1,4 +0,0 @@ -VISUAL_RECOGNITION_APIKEY=1234abcd -VISUAL_RECOGNITION_URL=https://stgwat-us-south-mzr-cruiser6.us-south.containers.cloud.ibm.com/visual-recognition/api -WATSON_APIKEY=5678efgh -WATSON_URL=https://gateway-s.watsonplatform.net/watson/api \ No newline at end of file diff --git a/resources/my-giraffe.jpeg b/resources/my-giraffe.jpeg new file mode 100644 index 000000000..ebed77d35 Binary files /dev/null and b/resources/my-giraffe.jpeg differ diff --git a/resources/personality-v3-es.txt b/resources/personality-v3-es.txt deleted file mode 100644 index 950fdb28e..000000000 --- a/resources/personality-v3-es.txt +++ /dev/null @@ -1,13 +0,0 @@ -En un lugar de la Mancha, de cuyo nombre no quiero acordarme, no ha mucho tiempo que vivía un hidalgo de los de lanza en astillero, adarga antigua, rocín flaco y galgo corredor. Una olla de algo más vaca que carnero, salpicón las más noches, duelos y quebrantos los sábados, lantejas los viernes, algún palomino de añadidura los domingos, consumían las tres partes de su hacienda. El resto della concluían sayo de velarte, calzas de velludo para las fiestas, con sus pantuflos de lo mesmo, y los días de entresemana se honraba con su vellorí de lo más fino. Tenía en su casa una ama que pasaba de los cuarenta, y una sobrina que no llegaba a los veinte, y un mozo de campo y plaza, que así ensillaba el rocín como tomaba la podadera. Frisaba la edad de nuestro hidalgo con los cincuenta años; era de complexión recia, seco de carnes, enjuto de rostro, gran madrugador y amigo de la caza. Quieren decir que tenía el sobrenombre de Quijada, o Quesada, que en esto hay alguna diferencia en los autores que deste caso escriben; aunque, por conjeturas verosímiles, se deja entender que se llamaba Quejana. Pero esto importa poco a nuestro cuento; basta que en la narración dél no se salga un punto de la verdad. -Es, pues, de saber que este sobredicho hidalgo, los ratos que estaba ocioso, que eran los más del año, se daba a leer libros de caballerías, con tanta afición y gusto, que olvidó casi de todo punto el ejercicio de la caza, y aun la administración de su hacienda. Y llegó a tanto su curiosidad y desatino en esto, que vendió muchas hanegas de tierra de sembradura para comprar libros de caballerías en que leer, y así, llevó a su casa todos cuantos pudo haber dellos; y de todos, ningunos le parecían tan bien como los que compuso el famoso Feliciano de Silva, porque la claridad de su prosa y aquellas entricadas razones suyas le parecían de perlas, y más cuando llegaba a leer aquellos requiebros y cartas de desafíos, donde en muchas partes hallaba escrito: La razón de la sinrazón que a mi razón se hace, de tal manera mi razón enflaquece, que con razón me quejo de la vuestra fermosura. Y también cuando leía: ...los altos cielos que de vuestra divinidad divinamente con las estrellas os fortifican, y os hacen merecedora del merecimiento que merece la vuestra grandeza. -Con estas razones perdía el pobre caballero el juicio, y desvelábase por entenderlas y desentrañarles el sentido, que no se lo sacara ni las entendiera el mesmo Aristóteles, si resucitara para sólo ello. No estaba muy bien con las heridas que don Belianís daba y recebía, porque se imaginaba que, por grandes maestros que le hubiesen curado, no dejaría de tener el rostro y todo el cuerpo lleno de cicatrices y señales. Pero, con todo, alababa en su autor aquel acabar su libro con la promesa de aquella inacabable aventura, y muchas veces le vino deseo de tomar la pluma y dalle fin al pie de la letra, como allí se promete; y sin duda alguna lo hiciera, y aun saliera con ello, si otros mayores y continuos pensamientos no se lo estorbaran. Tuvo muchas veces competencia con el cura de su lugar —que era hombre docto, graduado en Sigüenza—, sobre cuál había sido mejor caballero: Palmerín de Ingalaterra o Amadís de Gaula; mas maese Nicolás, barbero del mesmo pueblo, decía que ninguno llegaba al Caballero del Febo, y que si alguno se le podía comparar, era don Galaor, hermano de Amadís de Gaula, porque tenía muy acomodada condición para todo; que no era caballero melindroso, ni tan llorón como su hermano, y que en lo de la valentía no le iba en zaga. -En resolución, él se enfrascó tanto en su letura, que se le pasaban las noches leyendo de claro en claro, y los días de turbio en turbio; y así, del poco dormir y del mucho leer, se le secó el celebro, de manera que vino a perder el juicio. Llenósele la fantasía de todo aquello que leía en los libros, así de encantamentos como de pendencias, batallas, desafíos, heridas, requiebros, amores, tormentas y disparates imposibles; y asentósele de tal modo en la imaginación que era verdad toda aquella máquina de aquellas sonadas soñadas invenciones que leía, que para él no había otra historia más cierta en el mundo. Decía él que el Cid Ruy Díaz había sido muy buen caballero, pero que no tenía que ver con el Caballero de la Ardiente Espada, que de sólo un revés había partido por medio dos fieros y descomunales gigantes. Mejor estaba con Bernardo del Carpio, porque en Roncesvalles había muerto a Roldán el encantado, valiéndose de la industria de Hércules, cuando ahogó a Anteo, el hijo de la Tierra, entre los brazos. Decía mucho bien del gigante Morgante, porque, con ser de aquella generación gigantea, que todos son soberbios y descomedidos, él solo era afable y bien criado. Pero, sobre todos, estaba bien con Reinaldos de Montalbán, y más cuando le veía salir de su castillo y robar cuantos topaba, y cuando en allende robó aquel ídolo de Mahoma que era todo de oro, según dice su historia. Diera él, por dar una mano de coces al traidor de Galalón, al ama que tenía, y aun a su sobrina de añadidura. -En efeto, rematado ya su juicio, vino a dar en el más estraño pensamiento que jamás dio loco en el mundo; y fue que le pareció convenible y necesario, así para el aumento de su honra como para el servicio de su república, hacerse caballero andante, y irse por todo el mundo con sus armas y caballo a buscar las aventuras y a ejercitarse en todo aquello que él había leído que los caballeros andantes se ejercitaban, deshaciendo todo género de agravio, y poniéndose en ocasiones y peligros donde, acabándolos, cobrase eterno nombre y fama. Imaginábase el pobre ya coronado por el valor de su brazo, por lo menos, del imperio de Trapisonda; y así, con estos tan agradables pensamientos, llevado del estraño gusto que en ellos sentía, se dio priesa a poner en efeto lo que deseaba. -Y lo primero que hizo fue limpiar unas armas que habían sido de sus bisabuelos, que, tomadas de orín y llenas de moho, luengos siglos había que estaban puestas y olvidadas en un rincón. Limpiólas y aderezólas lo mejor que pudo, pero vio que tenían una gran falta, y era que no tenían celada de encaje, sino morrión simple; mas a esto suplió su industria, porque de cartones hizo un modo de media celada, que, encajada con el morrión, hacían una apariencia de celada entera. Es verdad que para probar si era fuerte y podía estar al riesgo de una cuchillada, sacó su espada y le dio dos golpes, y con el primero y en un punto deshizo lo que había hecho en una semana; y no dejó de parecerle mal la facilidad con que la había hecho pedazos, y, por asegurarse deste peligro, la tornó a hacer de nuevo, poniéndole unas barras de hierro por de dentro, de tal manera que él quedó satisfecho de su fortaleza; y, sin querer hacer nueva experiencia della, la diputó y tuvo por celada finísima de encaje. -Fue luego a ver su rocín, y, aunque tenía más cuartos que un real y más tachas que el caballo de Gonela, que tantum pellis et ossa fuit, le pareció que ni el Bucéfalo de Alejandro ni Babieca el del Cid con él se igualaban. Cuatro días se le pasaron en imaginar qué nombre le pondría; porque, según se decía él a sí mesmo, no era razón que caballo de caballero tan famoso, y tan bueno él por sí, estuviese sin nombre conocido; y ansí, procuraba acomodársele de manera que declarase quién había sido, antes que fuese de caballero andante, y lo que era entonces; pues estaba muy puesto en razón que, mudando su señor estado, mudase él también el nombre, y le cobrase famoso y de estruendo, como convenía a la nueva orden y al nuevo ejercicio que ya profesaba. Y así, después de muchos nombres que formó, borró y quitó, añadió, deshizo y tornó a hacer en su memoria e imaginación, al fin le vino a llamar Rocinante: nombre, a su parecer, alto, sonoro y significativo de lo que había sido cuando fue rocín, antes de lo que ahora era, que era antes y primero de todos los rocines del mundo. -Puesto nombre, y tan a su gusto, a su caballo, quiso ponérsele a sí mismo, y en este pensamiento duró otros ocho días, y al cabo se vino a llamar don Quijote; de donde —como queda dicho— tomaron ocasión los autores desta tan verdadera historia que, sin duda, se debía de llamar Quijada, y no Quesada, como otros quisieron decir. Pero, acordándose que el valeroso Amadís no sólo se había contentado con llamarse Amadís a secas, sino que añadió el nombre de su reino y patria, por Hepila famosa, y se llamó Amadís de Gaula, así quiso, como buen caballero, añadir al suyo el nombre de la suya y llamarse don Quijote de la Mancha, con que, a su parecer, declaraba muy al vivo su linaje y patria, y la honraba con tomar el sobrenombre della. -Limpias, pues, sus armas, hecho del morrión celada, puesto nombre a su rocín y confirmándose a sí mismo, se dio a entender que no le faltaba otra cosa sino buscar una dama de quien enamorarse; porque el caballero andante sin amores era árbol sin hojas y sin fruto y cuerpo sin alma. Decíase él a sí: -— Si yo, por malos de mis pecados, o por mi buena suerte, me encuentro por ahí con algún gigante, como de ordinario les acontece a los caballeros andantes, y le derribo de un encuentro, o le parto por mitad del cuerpo, o, finalmente, le venzo y le rindo, ¿no será bien tener a quien enviarle presentado y que entre y se hinque de rodillas ante mi dulce señora, y diga con voz humilde y rendido: ''Yo, señora, soy el gigante Caraculiambro, señor de la ínsula Malindrania, a quien venció en singular batalla el jamás como se debe alabado caballero don Quijote de la Mancha, el cual me mandó que me presentase ante vuestra merced, para que la vuestra grandeza disponga de mí a su talante''? -¡Oh, cómo se holgó nuestro buen caballero cuando hubo hecho este discurso, y más cuando halló a quien dar nombre de su dama! Y fue, a lo que se cree, que en un lugar cerca del suyo había una moza labradora de muy buen parecer, de quien él un tiempo anduvo enamorado, aunque, según se entiende, ella jamás lo supo, ni le dio cata dello. Llamábase Aldonza Lorenzo, y a ésta le pareció ser bien darle título de señora de sus pensamientos; y, buscándole nombre que no desdijese mucho del suyo, y que tirase y se encaminase al de princesa y gran señora, vino a llamarla Dulcinea del Toboso, porque era natural del Toboso; nombre, a su parecer, músico y peregrino y significativo, como todos los demás que a él y a sus cosas había puesto. - - diff --git a/resources/personality-v3-expect1.txt b/resources/personality-v3-expect1.txt deleted file mode 100755 index b69ea6bd4..000000000 --- a/resources/personality-v3-expect1.txt +++ /dev/null @@ -1 +0,0 @@ -{"word_count":1365,"processed_language":"en","personality":[{"trait_id":"big5_openness","name":"Openness","category":"personality","percentile":0.9970814244982864,"children":[{"trait_id":"facet_adventurousness","name":"Adventurousness","category":"personality","percentile":0.7897453561510369},{"trait_id":"facet_artistic_interests","name":"Artistic interests","category":"personality","percentile":0.9946576519208279},{"trait_id":"facet_emotionality","name":"Emotionality","category":"personality","percentile":0.7671631753694098},{"trait_id":"facet_imagination","name":"Imagination","category":"personality","percentile":0.3116772371947326},{"trait_id":"facet_intellect","name":"Intellect","category":"personality","percentile":0.9965199807027891},{"trait_id":"facet_liberalism","name":"Authority-challenging","category":"personality","percentile":0.797907272149325}]},{"trait_id":"big5_conscientiousness","name":"Conscientiousness","category":"personality","percentile":0.986401677449357,"children":[{"trait_id":"facet_achievement_striving","name":"Achievement striving","category":"personality","percentile":0.8403728912342907},{"trait_id":"facet_cautiousness","name":"Cautiousness","category":"personality","percentile":0.944186945742299},{"trait_id":"facet_dutifulness","name":"Dutifulness","category":"personality","percentile":0.7946276293038717},{"trait_id":"facet_orderliness","name":"Orderliness","category":"personality","percentile":0.7610741506407186},{"trait_id":"facet_self_discipline","name":"Self-discipline","category":"personality","percentile":0.712864917583896},{"trait_id":"facet_self_efficacy","name":"Self-efficacy","category":"personality","percentile":0.6994302718651364}]},{"trait_id":"big5_extraversion","name":"Extraversion","category":"personality","percentile":0.08530058556548259,"children":[{"trait_id":"facet_activity_level","name":"Activity level","category":"personality","percentile":0.962401631341592},{"trait_id":"facet_assertiveness","name":"Assertiveness","category":"personality","percentile":0.9198609213386704},{"trait_id":"facet_cheerfulness","name":"Cheerfulness","category":"personality","percentile":0.2293639969883699},{"trait_id":"facet_excitement_seeking","name":"Excitement-seeking","category":"personality","percentile":0.21024192850794732},{"trait_id":"facet_friendliness","name":"Outgoing","category":"personality","percentile":0.7085191412979603},{"trait_id":"facet_gregariousness","name":"Gregariousness","category":"personality","percentile":0.22458619358372}]},{"trait_id":"big5_agreeableness","name":"Agreeableness","category":"personality","percentile":0.1875352860319472,"children":[{"trait_id":"facet_altruism","name":"Altruism","category":"personality","percentile":0.9713302006331768},{"trait_id":"facet_cooperation","name":"Cooperation","category":"personality","percentile":0.8229934901276204},{"trait_id":"facet_modesty","name":"Modesty","category":"personality","percentile":0.761318814834163},{"trait_id":"facet_morality","name":"Uncompromising","category":"personality","percentile":0.9471478882849421},{"trait_id":"facet_sympathy","name":"Sympathy","category":"personality","percentile":0.9991179451374892},{"trait_id":"facet_trust","name":"Trust","category":"personality","percentile":0.830111046812001}]},{"trait_id":"big5_neuroticism","name":"Emotional range","category":"personality","percentile":0.9438564164580463,"children":[{"trait_id":"facet_anger","name":"Fiery","category":"personality","percentile":0.013938100678608567},{"trait_id":"facet_anxiety","name":"Prone to worry","category":"personality","percentile":0.062025789454073055},{"trait_id":"facet_depression","name":"Melancholy","category":"personality","percentile":0.35285841125133055},{"trait_id":"facet_immoderation","name":"Immoderation","category":"personality","percentile":0.011684379342279061},{"trait_id":"facet_self_consciousness","name":"Self-consciousness","category":"personality","percentile":0.19347068940127837},{"trait_id":"facet_vulnerability","name":"Susceptible to stress","category":"personality","percentile":0.06994539774378672}]}],"needs":[{"trait_id":"need_challenge","name":"Challenge","category":"needs","percentile":0.0032546536914939694},{"trait_id":"need_closeness","name":"Closeness","category":"needs","percentile":0.37022781101806856},{"trait_id":"need_curiosity","name":"Curiosity","category":"needs","percentile":0.845180482624851},{"trait_id":"need_excitement","name":"Excitement","category":"needs","percentile":0.11505596926601303},{"trait_id":"need_harmony","name":"Harmony","category":"needs","percentile":0.4664217424750215},{"trait_id":"need_ideal","name":"Ideal","category":"needs","percentile":0.02263412995273062},{"trait_id":"need_liberty","name":"Liberty","category":"needs","percentile":0.10802987716456186},{"trait_id":"need_love","name":"Love","category":"needs","percentile":0.01189533382101321},{"trait_id":"need_practicality","name":"Practicality","category":"needs","percentile":0.018888178951272983},{"trait_id":"need_self_expression","name":"Self-expression","category":"needs","percentile":0.18489782806561655},{"trait_id":"need_stability","name":"Stability","category":"needs","percentile":0.3946227431440047},{"trait_id":"need_structure","name":"Structure","category":"needs","percentile":0.8880129689346332}],"values":[{"trait_id":"value_conservation","name":"Conservation","category":"values","percentile":0.5065929218618456},{"trait_id":"value_openness_to_change","name":"Openness to change","category":"values","percentile":0.6287516949462554},{"trait_id":"value_hedonism","name":"Hedonism","category":"values","percentile":0.005253658217920731},{"trait_id":"value_self_enhancement","name":"Self-enhancement","category":"values","percentile":0.0011936431143393933},{"trait_id":"value_self_transcendence","name":"Self-transcendence","category":"values","percentile":0.3429609693883737}],"warnings":[]} diff --git a/resources/personality-v3-expect2.txt b/resources/personality-v3-expect2.txt deleted file mode 100755 index d89e5199e..000000000 --- a/resources/personality-v3-expect2.txt +++ /dev/null @@ -1 +0,0 @@ -{"word_count":15223,"processed_language":"en","personality":[{"trait_id":"big5_openness","name":"Openness","category":"personality","percentile":0.8011555009552956,"raw_score":0.7756540425503803,"children":[{"trait_id":"facet_adventurousness","name":"Adventurousness","category":"personality","percentile":0.8975586904731889,"raw_score":0.5499070403121904},{"trait_id":"facet_artistic_interests","name":"Artistic interests","category":"personality","percentile":0.9770309419531911,"raw_score":0.7663670485959833},{"trait_id":"facet_emotionality","name":"Emotionality","category":"personality","percentile":0.9947058875647474,"raw_score":0.7524002152027132},{"trait_id":"facet_imagination","name":"Imagination","category":"personality","percentile":0.8733065387317464,"raw_score":0.7915903144017673},{"trait_id":"facet_intellect","name":"Intellect","category":"personality","percentile":0.8717194796402018,"raw_score":0.6597622585300691},{"trait_id":"facet_liberalism","name":"Authority-challenging","category":"personality","percentile":0.6405414845731194,"raw_score":0.5343564751353819}]},{"trait_id":"big5_conscientiousness","name":"Conscientiousness","category":"personality","percentile":0.8100175318417588,"raw_score":0.6689998488881546,"children":[{"trait_id":"facet_achievement_striving","name":"Achievement striving","category":"personality","percentile":0.8461329922662831,"raw_score":0.7424011845488805},{"trait_id":"facet_cautiousness","name":"Cautiousness","category":"personality","percentile":0.7220362727004178,"raw_score":0.5296482988959449},{"trait_id":"facet_dutifulness","name":"Dutifulness","category":"personality","percentile":0.8421638467925515,"raw_score":0.6834730565103805},{"trait_id":"facet_orderliness","name":"Orderliness","category":"personality","percentile":0.6121858586705231,"raw_score":0.5034920799431641},{"trait_id":"facet_self_discipline","name":"Self-discipline","category":"personality","percentile":0.8317329416265953,"raw_score":0.616433633126353},{"trait_id":"facet_self_efficacy","name":"Self-efficacy","category":"personality","percentile":0.70883137095439,"raw_score":0.7724413163310413}]},{"trait_id":"big5_extraversion","name":"Extraversion","category":"personality","percentile":0.6498079607138185,"raw_score":0.5681773878116614,"children":[{"trait_id":"facet_activity_level","name":"Activity level","category":"personality","percentile":0.8822058491396538,"raw_score":0.6010699592614316},{"trait_id":"facet_assertiveness","name":"Assertiveness","category":"personality","percentile":0.668984138017408,"raw_score":0.6659099991098552},{"trait_id":"facet_cheerfulness","name":"Cheerfulness","category":"personality","percentile":0.9435264775235841,"raw_score":0.671332415082109},{"trait_id":"facet_excitement_seeking","name":"Excitement-seeking","category":"personality","percentile":0.5913387477205387,"raw_score":0.6133983269914512},{"trait_id":"facet_friendliness","name":"Outgoing","category":"personality","percentile":0.9577289025786391,"raw_score":0.6470028893580052},{"trait_id":"facet_gregariousness","name":"Gregariousness","category":"personality","percentile":0.6494284805198431,"raw_score":0.4730737068164407}]},{"trait_id":"big5_agreeableness","name":"Agreeableness","category":"personality","percentile":0.9478612479382063,"raw_score":0.8067781563180865,"children":[{"trait_id":"facet_altruism","name":"Altruism","category":"personality","percentile":0.9924198382420473,"raw_score":0.7902840629074717},{"trait_id":"facet_cooperation","name":"Cooperation","category":"personality","percentile":0.8612307420897902,"raw_score":0.644809933616134},{"trait_id":"facet_modesty","name":"Modesty","category":"personality","percentile":0.7726811931877515,"raw_score":0.4878296372120652},{"trait_id":"facet_morality","name":"Uncompromising","category":"personality","percentile":0.890791023357115,"raw_score":0.6838825205363425},{"trait_id":"facet_sympathy","name":"Sympathy","category":"personality","percentile":0.994218470874908,"raw_score":0.759901709852522},{"trait_id":"facet_trust","name":"Trust","category":"personality","percentile":0.9036111955659848,"raw_score":0.6394572920931907}]},{"trait_id":"big5_neuroticism","name":"Emotional range","category":"personality","percentile":0.5008224041628007,"raw_score":0.46748200007024476,"children":[{"trait_id":"facet_anger","name":"Fiery","category":"personality","percentile":0.17640022058508498,"raw_score":0.48490315691801983},{"trait_id":"facet_anxiety","name":"Prone to worry","category":"personality","percentile":0.42883076062186987,"raw_score":0.5818806184582846},{"trait_id":"facet_depression","name":"Melancholy","category":"personality","percentile":0.15019740428715633,"raw_score":0.3828467842344732},{"trait_id":"facet_immoderation","name":"Immoderation","category":"personality","percentile":0.26916719249302234,"raw_score":0.47694218652589115},{"trait_id":"facet_self_consciousness","name":"Self-consciousness","category":"personality","percentile":0.30351543340675236,"raw_score":0.5196515289516266},{"trait_id":"facet_vulnerability","name":"Susceptible to stress","category":"personality","percentile":0.3897206832678008,"raw_score":0.44977966970810673}]}],"needs":[{"trait_id":"need_challenge","name":"Challenge","category":"needs","percentile":0.673623320545115,"raw_score":0.751963480376755},{"trait_id":"need_closeness","name":"Closeness","category":"needs","percentile":0.8380283404181322,"raw_score":0.8371432732972359},{"trait_id":"need_curiosity","name":"Curiosity","category":"needs","percentile":0.9293839318960936,"raw_score":0.855371256030684},{"trait_id":"need_excitement","name":"Excitement","category":"needs","percentile":0.7280972568828032,"raw_score":0.7334275298402744},{"trait_id":"need_harmony","name":"Harmony","category":"needs","percentile":0.9694112904157444,"raw_score":0.8739053596457717},{"trait_id":"need_ideal","name":"Ideal","category":"needs","percentile":0.6824330657640135,"raw_score":0.7136043544694086},{"trait_id":"need_liberty","name":"Liberty","category":"needs","percentile":0.786964400223518,"raw_score":0.7663288169238623},{"trait_id":"need_love","name":"Love","category":"needs","percentile":0.8207992048058734,"raw_score":0.8133368299186845},{"trait_id":"need_practicality","name":"Practicality","category":"needs","percentile":0.3503620508268639,"raw_score":0.7194693605746305},{"trait_id":"need_self_expression","name":"Self-expression","category":"needs","percentile":0.8673284357850473,"raw_score":0.7134630858462259},{"trait_id":"need_stability","name":"Stability","category":"needs","percentile":0.8732565885512285,"raw_score":0.7708158066758997},{"trait_id":"need_structure","name":"Structure","category":"needs","percentile":0.7456082872690646,"raw_score":0.7139823598365089}],"values":[{"trait_id":"value_conservation","name":"Conservation","category":"values","percentile":0.8926822285613875,"raw_score":0.7213530818742335},{"trait_id":"value_openness_to_change","name":"Openness to change","category":"values","percentile":0.8575991638808613,"raw_score":0.825513084313229},{"trait_id":"value_hedonism","name":"Hedonism","category":"values","percentile":0.44128086884054324,"raw_score":0.7287543244960342},{"trait_id":"value_self_enhancement","name":"Self-enhancement","category":"values","percentile":0.6458578881392593,"raw_score":0.7227461699193419},{"trait_id":"value_self_transcendence","name":"Self-transcendence","category":"values","percentile":0.8237769534534466,"raw_score":0.8481040055218539}],"behavior":[{"trait_id":"behavior_sunday","name":"Sunday","category":"behavior","percentage":0.21392532795156408},{"trait_id":"behavior_monday","name":"Monday","category":"behavior","percentage":0.425832492431887},{"trait_id":"behavior_tuesday","name":"Tuesday","category":"behavior","percentage":0.07164480322906155},{"trait_id":"behavior_wednesday","name":"Wednesday","category":"behavior","percentage":0.011099899091826439},{"trait_id":"behavior_thursday","name":"Thursday","category":"behavior","percentage":0.12209889001009082},{"trait_id":"behavior_friday","name":"Friday","category":"behavior","percentage":0.07769929364278506},{"trait_id":"behavior_saturday","name":"Saturday","category":"behavior","percentage":0.07769929364278506},{"trait_id":"behavior_0000","name":"0:00 am","category":"behavior","percentage":0.45610494450050454},{"trait_id":"behavior_0100","name":"1:00 am","category":"behavior","percentage":0.12209889001009082},{"trait_id":"behavior_0200","name":"2:00 am","category":"behavior","percentage":0.02119071644803229},{"trait_id":"behavior_0300","name":"3:00 am","category":"behavior","percentage":0.09485368314833502},{"trait_id":"behavior_0400","name":"4:00 am","category":"behavior","percentage":0.020181634712411706},{"trait_id":"behavior_0500","name":"5:00 am","category":"behavior","percentage":0.0},{"trait_id":"behavior_0600","name":"6:00 am","category":"behavior","percentage":0.0},{"trait_id":"behavior_0700","name":"7:00 am","category":"behavior","percentage":0.011099899091826439},{"trait_id":"behavior_0800","name":"8:00 am","category":"behavior","percentage":0.0},{"trait_id":"behavior_0900","name":"9:00 am","category":"behavior","percentage":0.0},{"trait_id":"behavior_1000","name":"10:00 am","category":"behavior","percentage":0.0},{"trait_id":"behavior_1100","name":"11:00 am","category":"behavior","percentage":0.0},{"trait_id":"behavior_1200","name":"12:00 pm","category":"behavior","percentage":0.0},{"trait_id":"behavior_1300","name":"1:00 pm","category":"behavior","percentage":0.0},{"trait_id":"behavior_1400","name":"2:00 pm","category":"behavior","percentage":0.0},{"trait_id":"behavior_1500","name":"3:00 pm","category":"behavior","percentage":0.022199798183652877},{"trait_id":"behavior_1600","name":"4:00 pm","category":"behavior","percentage":0.022199798183652877},{"trait_id":"behavior_1700","name":"5:00 pm","category":"behavior","percentage":0.03229061553985873},{"trait_id":"behavior_1800","name":"6:00 pm","category":"behavior","percentage":0.010090817356205853},{"trait_id":"behavior_1900","name":"7:00 pm","category":"behavior","percentage":0.011099899091826439},{"trait_id":"behavior_2000","name":"8:00 pm","category":"behavior","percentage":0.022199798183652877},{"trait_id":"behavior_2100","name":"9:00 pm","category":"behavior","percentage":0.0},{"trait_id":"behavior_2200","name":"10:00 pm","category":"behavior","percentage":0.03128153380423814},{"trait_id":"behavior_2300","name":"11:00 pm","category":"behavior","percentage":0.1231079717457114}],"consumption_preferences":[{"consumption_preference_category_id":"consumption_preferences_shopping","name":"Purchasing Preferences","consumption_preferences":[{"consumption_preference_id":"consumption_preferences_automobile_ownership_cost","name":"Likely to be sensitive to ownership cost when buying automobiles","score":0.0},{"consumption_preference_id":"consumption_preferences_automobile_safety","name":"Likely to prefer safety when buying automobiles","score":0.5},{"consumption_preference_id":"consumption_preferences_automobile_resale_value","name":"Likely to prefer resale value when buying automobiles","score":1.0},{"consumption_preference_id":"consumption_preferences_clothes_quality","name":"Likely to prefer quality when buying clothes","score":0.0},{"consumption_preference_id":"consumption_preferences_clothes_style","name":"Likely to prefer style when buying clothes","score":1.0},{"consumption_preference_id":"consumption_preferences_clothes_comfort","name":"Likely to prefer comfort when buying clothes","score":0.0},{"consumption_preference_id":"consumption_preferences_influence_brand_name","name":"Likely to be influenced by brand name when making product purchases","score":0.5},{"consumption_preference_id":"consumption_preferences_influence_utility","name":"Likely to be influenced by product utility when making product purchases","score":0.5},{"consumption_preference_id":"consumption_preferences_influence_online_ads","name":"Likely to be influenced by online ads when making product purchases","score":1.0},{"consumption_preference_id":"consumption_preferences_influence_social_media","name":"Likely to be influenced by social media when making product purchases","score":1.0},{"consumption_preference_id":"consumption_preferences_influence_family_members","name":"Likely to be influenced by family when making product purchases","score":1.0},{"consumption_preference_id":"consumption_preferences_spur_of_moment","name":"Likely to indulge in spur of the moment purchases","score":0.5},{"consumption_preference_id":"consumption_preferences_credit_card_payment","name":"Likely to prefer using credit cards for shopping","score":0.0}]},{"consumption_preference_category_id":"consumption_preferences_health_and_activity","name":"Health & Activity Preferences","consumption_preferences":[{"consumption_preference_id":"consumption_preferences_eat_out","name":"Likely to eat out frequently","score":1.0},{"consumption_preference_id":"consumption_preferences_fast_food_frequency","name":"Likely to eat fast food frequently","score":1.0},{"consumption_preference_id":"consumption_preferences_gym_membership","name":"Likely to have a gym membership","score":1.0},{"consumption_preference_id":"consumption_preferences_adventurous_sports","name":"Likely to like adventurous sports","score":1.0},{"consumption_preference_id":"consumption_preferences_outdoor","name":"Likely to like outdoor activities","score":0.0}]},{"consumption_preference_category_id":"consumption_preferences_environmental_concern","name":"Environmental Concern Preferences","consumption_preferences":[{"consumption_preference_id":"consumption_preferences_concerned_environment","name":"Likely to be concerned about the environment","score":0.0}]},{"consumption_preference_category_id":"consumption_preferences_entrepreneurship","name":"Entrepreneurship Preferences","consumption_preferences":[{"consumption_preference_id":"consumption_preferences_start_business","name":"Likely to consider starting a business in next few years","score":1.0}]},{"consumption_preference_category_id":"consumption_preferences_movie","name":"Movie Preferences","consumption_preferences":[{"consumption_preference_id":"consumption_preferences_movie_romance","name":"Likely to like romance movies","score":1.0},{"consumption_preference_id":"consumption_preferences_movie_adventure","name":"Likely to like adventure movies","score":0.0},{"consumption_preference_id":"consumption_preferences_movie_horror","name":"Likely to like horror movies","score":1.0},{"consumption_preference_id":"consumption_preferences_movie_musical","name":"Likely to like musical movies","score":0.0},{"consumption_preference_id":"consumption_preferences_movie_historical","name":"Likely to like historical movies","score":0.0},{"consumption_preference_id":"consumption_preferences_movie_science_fiction","name":"Likely to like science-fiction movies","score":0.0},{"consumption_preference_id":"consumption_preferences_movie_war","name":"Likely to like war movies","score":0.0},{"consumption_preference_id":"consumption_preferences_movie_drama","name":"Likely to like drama movies","score":1.0},{"consumption_preference_id":"consumption_preferences_movie_action","name":"Likely to like action movies","score":0.0},{"consumption_preference_id":"consumption_preferences_movie_documentary","name":"Likely to like documentary movies","score":0.0}]},{"consumption_preference_category_id":"consumption_preferences_music","name":"Music Preferences","consumption_preferences":[{"consumption_preference_id":"consumption_preferences_music_rap","name":"Likely to like rap music","score":1.0},{"consumption_preference_id":"consumption_preferences_music_country","name":"Likely to like country music","score":1.0},{"consumption_preference_id":"consumption_preferences_music_r_b","name":"Likely to like R&B music","score":1.0},{"consumption_preference_id":"consumption_preferences_music_hip_hop","name":"Likely to like hip hop music","score":1.0},{"consumption_preference_id":"consumption_preferences_music_live_event","name":"Likely to attend live musical events","score":0.0},{"consumption_preference_id":"consumption_preferences_music_playing","name":"Likely to have experience playing music","score":0.0},{"consumption_preference_id":"consumption_preferences_music_latin","name":"Likely to like Latin music","score":1.0},{"consumption_preference_id":"consumption_preferences_music_rock","name":"Likely to like rock music","score":0.0},{"consumption_preference_id":"consumption_preferences_music_classical","name":"Likely to like classical music","score":0.0}]},{"consumption_preference_category_id":"consumption_preferences_reading","name":"Reading Preferences","consumption_preferences":[{"consumption_preference_id":"consumption_preferences_read_frequency","name":"Likely to read often","score":0.0},{"consumption_preference_id":"consumption_preferences_read_motive_enjoyment","name":"Likely to read for enjoyment","score":0.0},{"consumption_preference_id":"consumption_preferences_read_motive_information","name":"Likely to read for information","score":0.0},{"consumption_preference_id":"consumption_preferences_books_entertainment_magazines","name":"Likely to read entertainment magazines","score":1.0},{"consumption_preference_id":"consumption_preferences_books_non_fiction","name":"Likely to read non-fiction books","score":0.0},{"consumption_preference_id":"consumption_preferences_read_motive_mandatory","name":"Likely to do mandatory reading only","score":1.0},{"consumption_preference_id":"consumption_preferences_read_motive_relaxation","name":"Likely to read for relaxation","score":1.0},{"consumption_preference_id":"consumption_preferences_books_financial_investing","name":"Likely to read financial investment books","score":1.0},{"consumption_preference_id":"consumption_preferences_books_autobiographies","name":"Likely to read autobiographical books","score":0.0}]},{"consumption_preference_category_id":"consumption_preferences_volunteering","name":"Volunteering Preferences","consumption_preferences":[{"consumption_preference_id":"consumption_preferences_volunteer","name":"Likely to volunteer for social causes","score":0.0},{"consumption_preference_id":"consumption_preferences_volunteering_time","name":"Likely to have spent time volunteering","score":1.0},{"consumption_preference_id":"consumption_preferences_volunteer_learning","name":"Likely to volunteer to learn about social causes","score":0.0}]}],"warnings":[]} diff --git a/resources/personality-v3-expect3.txt b/resources/personality-v3-expect3.txt deleted file mode 100755 index ce84fc476..000000000 --- a/resources/personality-v3-expect3.txt +++ /dev/null @@ -1,2 +0,0 @@ -big5_agreeableness,facet_altruism,facet_cooperation,facet_modesty,facet_morality,facet_sympathy,facet_trust,big5_conscientiousness,facet_achievement_striving,facet_cautiousness,facet_dutifulness,facet_orderliness,facet_self_discipline,facet_self_efficacy,big5_extraversion,facet_activity_level,facet_assertiveness,facet_cheerfulness,facet_excitement_seeking,facet_friendliness,facet_gregariousness,big5_neuroticism,facet_anger,facet_anxiety,facet_depression,facet_immoderation,facet_self_consciousness,facet_vulnerability,big5_openness,facet_adventurousness,facet_artistic_interests,facet_emotionality,facet_imagination,facet_intellect,facet_liberalism,need_liberty,need_ideal,need_love,need_practicality,need_self_expression,need_stability,need_structure,need_challenge,need_closeness,need_curiosity,need_excitement,need_harmony,value_conservation,value_hedonism,value_openness_to_change,value_self_enhancement,value_self_transcendence,behavior_sunday,behavior_monday,behavior_tuesday,behavior_wednesday,behavior_thursday,behavior_friday,behavior_saturday,behavior_0000,behavior_0100,behavior_0200,behavior_0300,behavior_0400,behavior_0500,behavior_0600,behavior_0700,behavior_0800,behavior_0900,behavior_1000,behavior_1100,behavior_1200,behavior_1300,behavior_1400,behavior_1500,behavior_1600,behavior_1700,behavior_1800,behavior_1900,behavior_2000,behavior_2100,behavior_2200,behavior_2300,word_count,processed_language,big5_agreeableness_raw,facet_altruism_raw,facet_cooperation_raw,facet_modesty_raw,facet_morality_raw,facet_sympathy_raw,facet_trust_raw,big5_conscientiousness_raw,facet_achievement_striving_raw,facet_cautiousness_raw,facet_dutifulness_raw,facet_orderliness_raw,facet_self_discipline_raw,facet_self_efficacy_raw,big5_extraversion_raw,facet_activity_level_raw,facet_assertiveness_raw,facet_cheerfulness_raw,facet_excitement_seeking_raw,facet_friendliness_raw,facet_gregariousness_raw,big5_neuroticism_raw,facet_anger_raw,facet_anxiety_raw,facet_depression_raw,facet_immoderation_raw,facet_self_consciousness_raw,facet_vulnerability_raw,big5_openness_raw,facet_adventurousness_raw,facet_artistic_interests_raw,facet_emotionality_raw,facet_imagination_raw,facet_intellect_raw,facet_liberalism_raw,need_liberty_raw,need_ideal_raw,need_love_raw,need_practicality_raw,need_self_expression_raw,need_stability_raw,need_structure_raw,need_challenge_raw,need_closeness_raw,need_curiosity_raw,need_excitement_raw,need_harmony_raw,value_conservation_raw,value_hedonism_raw,value_openness_to_change_raw,value_self_enhancement_raw,value_self_transcendence_raw,consumption_preferences_spur_of_moment,consumption_preferences_credit_card_payment,consumption_preferences_influence_brand_name,consumption_preferences_influence_utility,consumption_preferences_influence_online_ads,consumption_preferences_influence_social_media,consumption_preferences_influence_family_members,consumption_preferences_clothes_quality,consumption_preferences_clothes_style,consumption_preferences_clothes_comfort,consumption_preferences_automobile_ownership_cost,consumption_preferences_automobile_safety,consumption_preferences_automobile_resale_value,consumption_preferences_music_rap,consumption_preferences_music_country,consumption_preferences_music_r_b,consumption_preferences_music_hip_hop,consumption_preferences_music_live_event,consumption_preferences_music_playing,consumption_preferences_music_latin,consumption_preferences_music_rock,consumption_preferences_music_classical,consumption_preferences_gym_membership,consumption_preferences_adventurous_sports,consumption_preferences_outdoor,consumption_preferences_eat_out,consumption_preferences_fast_food_frequency,consumption_preferences_movie_romance,consumption_preferences_movie_adventure,consumption_preferences_movie_horror,consumption_preferences_movie_musical,consumption_preferences_movie_historical,consumption_preferences_movie_science_fiction,consumption_preferences_movie_war,consumption_preferences_movie_drama,consumption_preferences_movie_action,consumption_preferences_movie_documentary,consumption_preferences_read_frequency,consumption_preferences_read_motive_enjoyment,consumption_preferences_read_motive_information,consumption_preferences_read_motive_mandatory,consumption_preferences_read_motive_relaxation,consumption_preferences_books_entertainment_magazines,consumption_preferences_books_non_fiction,consumption_preferences_books_financial_investing,consumption_preferences_books_autobiographies,consumption_preferences_volunteer,consumption_preferences_volunteering_time,consumption_preferences_volunteer_learning,consumption_preferences_concerned_environment,consumption_preferences_start_business -0.1875352860319472,0.9713302006331768,0.8229934901276204,0.761318814834163,0.9471478882849421,0.9991179451374892,0.830111046812001,0.986401677449357,0.8403728912342907,0.944186945742299,0.7946276293038717,0.7610741506407186,0.712864917583896,0.6994302718651364,0.08530058556548259,0.962401631341592,0.9198609213386704,0.2293639969883699,0.21024192850794732,0.7085191412979603,0.22458619358372,0.9438564164580463,0.013938100678608567,0.062025789454073055,0.35285841125133055,0.011684379342279061,0.19347068940127837,0.06994539774378672,0.9970814244982864,0.7897453561510369,0.9946576519208279,0.7671631753694098,0.3116772371947326,0.9965199807027891,0.797907272149325,0.10802987716456186,0.02263412995273062,0.01189533382101321,0.018888178951272983,0.18489782806561655,0.3946227431440047,0.8880129689346332,0.0032546536914939694,0.37022781101806856,0.845180482624851,0.11505596926601303,0.4664217424750215,0.5065929218618456,0.005253658217920731,0.6287516949462554,0.0011936431143393933,0.3429609693883737,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1365,en,0.7069355244930271,0.7717679751112927,0.6352286286618323,0.4858691141214019,0.702145642516,0.7828928930725229,0.6251288245815745,0.731065896229928,0.7411306938189824,0.5929015783660554,0.6790451583920154,0.5174048448459116,0.5974142772332323,0.7714843433917522,0.4898595875512197,0.6314221244549749,0.7142519242541164,0.5932729161331092,0.5694475628767053,0.5880272412488141,0.4144362156057161,0.5568839124901138,0.41546033577632724,0.489225611469312,0.42452148443292836,0.41344777510142944,0.5011894182219927,0.37357140402417355,0.83666730981323,0.5334674872694041,0.7945583831155767,0.677937382446223,0.7104655052955525,0.7321638770376435,0.5582434731245067,0.6901684496009852,0.5959081695663969,0.6586965498215966,0.6828926516103326,0.6441012500714469,0.7259366269839532,0.7290291261454519,0.6097534338543016,0.7786579590270303,0.8433898277044034,0.5898767782432288,0.8063478875213775,0.6661941759119986,0.5746924423258591,0.7969374222994671,0.5730785322934739,0.8263720901347662,0.0,1.0,0.0,1.0,1.0,0.0,1.0,1.0,0.0,1.0,0.5,0.0,1.0,0.0,0.5,1.0,0.5,0.0,0.0,1.0,0.5,1.0,0.0,0.0,0.5,0.0,0.0,0.0,1.0,0.0,0.0,1.0,1.0,0.0,0.0,1.0,1.0,1.0,1.0,1.0,0.0,1.0,0.0,1.0,0.0,0.0,1.0,1.0,0.0,1.0,0.5 diff --git a/resources/personality-v3-expect4.txt b/resources/personality-v3-expect4.txt deleted file mode 100755 index cefac606d..000000000 --- a/resources/personality-v3-expect4.txt +++ /dev/null @@ -1 +0,0 @@ -{"word_count":2054,"processed_language":"es","personality":[{"trait_id":"big5_openness","name":"Apertura a experiencias","category":"personality","percentile":0.937254665925888,"raw_score":0.6665054437659199,"children":[{"trait_id":"facet_adventurousness","name":"Audacia","category":"personality","percentile":0.08223746859291331,"raw_score":0.42933795357475174},{"trait_id":"facet_artistic_interests","name":"Intereses artísticos","category":"personality","percentile":0.9763304400942869,"raw_score":0.7002492316426583},{"trait_id":"facet_emotionality","name":"Emocionalidad","category":"personality","percentile":0.7514798288441382,"raw_score":0.6329457809108067},{"trait_id":"facet_imagination","name":"Imaginación","category":"personality","percentile":0.8149758845160733,"raw_score":0.8358450161141352},{"trait_id":"facet_intellect","name":"Intelecto","category":"personality","percentile":0.709763785945054,"raw_score":0.5393985514175461},{"trait_id":"facet_liberalism","name":"Desafío a la autoridad","category":"personality","percentile":0.6238685851515903,"raw_score":0.5032730384879351}]},{"trait_id":"big5_conscientiousness","name":"Responsabilidad","category":"personality","percentile":0.8652601748372407,"raw_score":0.5675610518817606,"children":[{"trait_id":"facet_achievement_striving","name":"Necesidad de éxito","category":"personality","percentile":0.8616153196657172,"raw_score":0.5590390364812622},{"trait_id":"facet_cautiousness","name":"Cautela","category":"personality","percentile":0.8107894835477681,"raw_score":0.3956917603116589},{"trait_id":"facet_dutifulness","name":"Obediencia","category":"personality","percentile":0.7361183850960512,"raw_score":0.6242547149850359},{"trait_id":"facet_orderliness","name":"Disciplina","category":"personality","percentile":0.7239663954817621,"raw_score":0.4064822536153153},{"trait_id":"facet_self_discipline","name":"Autodisciplina","category":"personality","percentile":0.7198280681937614,"raw_score":0.5069844967090522},{"trait_id":"facet_self_efficacy","name":"Autoeficacia","category":"personality","percentile":0.6555485467551172,"raw_score":0.7166506366360331}]},{"trait_id":"big5_extraversion","name":"Extroversión","category":"personality","percentile":0.8312616324634844,"raw_score":0.5904152727753278,"children":[{"trait_id":"facet_activity_level","name":"Nivel de actividad","category":"personality","percentile":0.3050469697893306,"raw_score":0.48428799368416525},{"trait_id":"facet_assertiveness","name":"Seguridad en uno mismo","category":"personality","percentile":0.8397260688330984,"raw_score":0.6518273502161546},{"trait_id":"facet_cheerfulness","name":"Alegría","category":"personality","percentile":0.15273505645350988,"raw_score":0.6248204372077145},{"trait_id":"facet_excitement_seeking","name":"Búsqueda de emociones","category":"personality","percentile":0.7847013019475804,"raw_score":0.6442345985222767},{"trait_id":"facet_friendliness","name":"Simpatía","category":"personality","percentile":0.4308672854960358,"raw_score":0.5713958380902632},{"trait_id":"facet_gregariousness","name":"Sociabilidad","category":"personality","percentile":0.14583775819539813,"raw_score":0.4718274671256566}]},{"trait_id":"big5_agreeableness","name":"Amabilidad","category":"personality","percentile":0.964097852599053,"raw_score":0.6531530954966219,"children":[{"trait_id":"facet_altruism","name":"Altruismo","category":"personality","percentile":0.8454904962948867,"raw_score":0.6988130323165977},{"trait_id":"facet_cooperation","name":"Cooperación","category":"personality","percentile":0.7090285746898252,"raw_score":0.5034689841495227},{"trait_id":"facet_modesty","name":"Modestia","category":"personality","percentile":0.3356036734453778,"raw_score":0.37505142742666475},{"trait_id":"facet_morality","name":"Intransigencia","category":"personality","percentile":0.5970727450220207,"raw_score":0.5626043098951097},{"trait_id":"facet_sympathy","name":"Compasión","category":"personality","percentile":0.8405910443888318,"raw_score":0.6703129231871922},{"trait_id":"facet_trust","name":"Confianza","category":"personality","percentile":0.7434899651065617,"raw_score":0.584058726755165}]},{"trait_id":"big5_neuroticism","name":"Rango emocional","category":"personality","percentile":0.5289409694752685,"raw_score":0.487815337385794,"children":[{"trait_id":"facet_anger","name":"Vehemencia","category":"personality","percentile":0.49899417826927367,"raw_score":0.5721035977629064},{"trait_id":"facet_anxiety","name":"Tendencia a la preocupación","category":"personality","percentile":0.3288266523535158,"raw_score":0.7282190556201247},{"trait_id":"facet_depression","name":"Melancolía","category":"personality","percentile":0.29056657042415834,"raw_score":0.514863148159452},{"trait_id":"facet_immoderation","name":"Desmesura","category":"personality","percentile":0.4768272523338591,"raw_score":0.49394240481419255},{"trait_id":"facet_self_consciousness","name":"Timidez","category":"personality","percentile":0.41952877081366,"raw_score":0.5533629213910396},{"trait_id":"facet_vulnerability","name":"Susceptibilidad a la tensión","category":"personality","percentile":0.8928596088709371,"raw_score":0.7197355877820822}]}],"needs":[{"trait_id":"need_challenge","name":"Desafío","category":"needs","percentile":0.559611972188894,"raw_score":0.748742086057447},{"trait_id":"need_closeness","name":"Familiaridad","category":"needs","percentile":0.8955577050509591,"raw_score":0.8040722237206381},{"trait_id":"need_curiosity","name":"Curiosidad","category":"needs","percentile":0.09726991406313656,"raw_score":0.7301955596902647},{"trait_id":"need_excitement","name":"Entusiasmo","category":"needs","percentile":0.13382056325102437,"raw_score":0.7037297990204079},{"trait_id":"need_harmony","name":"Armonía","category":"needs","percentile":0.9573838279593837,"raw_score":0.8680468150331786},{"trait_id":"need_ideal","name":"Ideal","category":"needs","percentile":0.21515556100273503,"raw_score":0.6132355010854986},{"trait_id":"need_liberty","name":"Libertad","category":"needs","percentile":0.7345204750013818,"raw_score":0.7668148207046253},{"trait_id":"need_love","name":"Amor","category":"needs","percentile":0.279330012389927,"raw_score":0.7401357410740972},{"trait_id":"need_practicality","name":"Practicidad","category":"needs","percentile":0.9519859431515265,"raw_score":0.8152097612302944},{"trait_id":"need_self_expression","name":"Autoexpresión","category":"needs","percentile":0.45551641520878955,"raw_score":0.6552372473325437},{"trait_id":"need_stability","name":"Estabilidad","category":"needs","percentile":0.7890941903595212,"raw_score":0.7155622088047298},{"trait_id":"need_structure","name":"Estructura","category":"needs","percentile":0.8701561216649387,"raw_score":0.6872552118295897}],"values":[{"trait_id":"value_conservation","name":"Conservación","category":"values","percentile":0.7229840083480119,"raw_score":0.6823055252116184},{"trait_id":"value_openness_to_change","name":"Apertura al cambio","category":"values","percentile":0.25516943326837055,"raw_score":0.7776804808576244},{"trait_id":"value_hedonism","name":"Hedonismo","category":"values","percentile":0.2642599286231329,"raw_score":0.7968264374887243},{"trait_id":"value_self_enhancement","name":"Superación personal","category":"values","percentile":0.14635996017074898,"raw_score":0.6187436884883577},{"trait_id":"value_self_transcendence","name":"Autotranscendencia","category":"values","percentile":0.7717967307009796,"raw_score":0.8563743707155973}],"consumption_preferences":[{"consumption_preference_category_id":"consumption_preferences_shopping","name":"Purchasing Preferences","consumption_preferences":[{"consumption_preference_id":"consumption_preferences_automobile_ownership_cost","name":"Likely to be sensitive to ownership cost when buying automobiles","score":0.0},{"consumption_preference_id":"consumption_preferences_automobile_safety","name":"Likely to prefer safety when buying automobiles","score":1.0},{"consumption_preference_id":"consumption_preferences_automobile_resale_value","name":"Likely to prefer resale value when buying automobiles","score":1.0},{"consumption_preference_id":"consumption_preferences_clothes_quality","name":"Likely to prefer quality when buying clothes","score":1.0},{"consumption_preference_id":"consumption_preferences_clothes_style","name":"Likely to prefer style when buying clothes","score":0.0},{"consumption_preference_id":"consumption_preferences_clothes_comfort","name":"Likely to prefer comfort when buying clothes","score":0.0},{"consumption_preference_id":"consumption_preferences_influence_brand_name","name":"Likely to be influenced by brand name when making product purchases","score":1.0},{"consumption_preference_id":"consumption_preferences_influence_utility","name":"Likely to be influenced by product utility when making product purchases","score":0.5},{"consumption_preference_id":"consumption_preferences_influence_online_ads","name":"Likely to be influenced by online ads when making product purchases","score":1.0},{"consumption_preference_id":"consumption_preferences_influence_social_media","name":"Likely to be influenced by social media when making product purchases","score":1.0},{"consumption_preference_id":"consumption_preferences_influence_family_members","name":"Likely to be influenced by family when making product purchases","score":1.0},{"consumption_preference_id":"consumption_preferences_spur_of_moment","name":"Likely to indulge in spur of the moment purchases","score":0.5},{"consumption_preference_id":"consumption_preferences_credit_card_payment","name":"Likely to prefer using credit cards for shopping","score":1.0}]},{"consumption_preference_category_id":"consumption_preferences_health_and_activity","name":"Health & Activity Preferences","consumption_preferences":[{"consumption_preference_id":"consumption_preferences_eat_out","name":"Likely to eat out frequently","score":0.0},{"consumption_preference_id":"consumption_preferences_fast_food_frequency","name":"Likely to eat fast food frequently","score":0.5},{"consumption_preference_id":"consumption_preferences_gym_membership","name":"Likely to have a gym membership","score":0.0},{"consumption_preference_id":"consumption_preferences_adventurous_sports","name":"Likely to like adventurous sports","score":1.0},{"consumption_preference_id":"consumption_preferences_outdoor","name":"Likely to like outdoor activities","score":0.0}]},{"consumption_preference_category_id":"consumption_preferences_environmental_concern","name":"Environmental Concern Preferences","consumption_preferences":[{"consumption_preference_id":"consumption_preferences_concerned_environment","name":"Likely to be concerned about the environment","score":0.5}]},{"consumption_preference_category_id":"consumption_preferences_entrepreneurship","name":"Entrepreneurship Preferences","consumption_preferences":[{"consumption_preference_id":"consumption_preferences_start_business","name":"Likely to consider starting a business in next few years","score":1.0}]},{"consumption_preference_category_id":"consumption_preferences_movie","name":"Movie Preferences","consumption_preferences":[{"consumption_preference_id":"consumption_preferences_movie_romance","name":"Likely to like romance movies","score":1.0},{"consumption_preference_id":"consumption_preferences_movie_adventure","name":"Likely to like adventure movies","score":0.0},{"consumption_preference_id":"consumption_preferences_movie_horror","name":"Likely to like horror movies","score":1.0},{"consumption_preference_id":"consumption_preferences_movie_musical","name":"Likely to like musical movies","score":0.0},{"consumption_preference_id":"consumption_preferences_movie_historical","name":"Likely to like historical movies","score":0.0},{"consumption_preference_id":"consumption_preferences_movie_science_fiction","name":"Likely to like science-fiction movies","score":0.0},{"consumption_preference_id":"consumption_preferences_movie_war","name":"Likely to like war movies","score":0.0},{"consumption_preference_id":"consumption_preferences_movie_drama","name":"Likely to like drama movies","score":1.0},{"consumption_preference_id":"consumption_preferences_movie_action","name":"Likely to like action movies","score":0.0},{"consumption_preference_id":"consumption_preferences_movie_documentary","name":"Likely to like documentary movies","score":0.0}]},{"consumption_preference_category_id":"consumption_preferences_music","name":"Music Preferences","consumption_preferences":[{"consumption_preference_id":"consumption_preferences_music_rap","name":"Likely to like rap music","score":1.0},{"consumption_preference_id":"consumption_preferences_music_country","name":"Likely to like country music","score":1.0},{"consumption_preference_id":"consumption_preferences_music_r_b","name":"Likely to like R&B music","score":1.0},{"consumption_preference_id":"consumption_preferences_music_hip_hop","name":"Likely to like hip hop music","score":1.0},{"consumption_preference_id":"consumption_preferences_music_live_event","name":"Likely to attend live musical events","score":0.0},{"consumption_preference_id":"consumption_preferences_music_playing","name":"Likely to have experience playing music","score":0.0},{"consumption_preference_id":"consumption_preferences_music_latin","name":"Likely to like Latin music","score":0.0},{"consumption_preference_id":"consumption_preferences_music_rock","name":"Likely to like rock music","score":0.5},{"consumption_preference_id":"consumption_preferences_music_classical","name":"Likely to like classical music","score":0.0}]},{"consumption_preference_category_id":"consumption_preferences_reading","name":"Reading Preferences","consumption_preferences":[{"consumption_preference_id":"consumption_preferences_read_frequency","name":"Likely to read often","score":0.5},{"consumption_preference_id":"consumption_preferences_read_motive_enjoyment","name":"Likely to read for enjoyment","score":0.0},{"consumption_preference_id":"consumption_preferences_read_motive_information","name":"Likely to read for information","score":0.0},{"consumption_preference_id":"consumption_preferences_books_entertainment_magazines","name":"Likely to read entertainment magazines","score":1.0},{"consumption_preference_id":"consumption_preferences_books_non_fiction","name":"Likely to read non-fiction books","score":1.0},{"consumption_preference_id":"consumption_preferences_read_motive_mandatory","name":"Likely to do mandatory reading only","score":0.0},{"consumption_preference_id":"consumption_preferences_read_motive_relaxation","name":"Likely to read for relaxation","score":1.0},{"consumption_preference_id":"consumption_preferences_books_financial_investing","name":"Likely to read financial investment books","score":0.0},{"consumption_preference_id":"consumption_preferences_books_autobiographies","name":"Likely to read autobiographical books","score":0.0}]},{"consumption_preference_category_id":"consumption_preferences_volunteering","name":"Volunteering Preferences","consumption_preferences":[{"consumption_preference_id":"consumption_preferences_volunteer","name":"Likely to volunteer for social causes","score":0.0},{"consumption_preference_id":"consumption_preferences_volunteering_time","name":"Likely to have spent time volunteering","score":0.0},{"consumption_preference_id":"consumption_preferences_volunteer_learning","name":"Likely to volunteer to learn about social causes","score":0.0}]}],"warnings":[]} diff --git a/resources/personality-v3.json b/resources/personality-v3.json deleted file mode 100755 index 5b6c5d1a1..000000000 --- a/resources/personality-v3.json +++ /dev/null @@ -1,6941 +0,0 @@ -{ - "contentItems": [ - { - "content": "Wow, I liked @TheRock before, now I really SEE how special he is. The daughter story was IT for me. So great! #MasterClass", - "contenttype": "text/plain", - "created": 1447639154000, - "id": "666073008692314113", - "language": "en" - }, - { - "content": ".@TheRock how did you Know to listen to your gut and Not go back to football? #Masterclass", - "contenttype": "text/plain", - "created": 1447638226000, - "id": "666069114889179136", - "language": "en" - }, - { - "content": ".@TheRock moving back in with your parents so humbling. \" on the other side of your pain is something good if you can hold on\" #masterclass", - "contenttype": "text/plain", - "created": 1447638067000, - "id": "666068446325665792", - "language": "en" - }, - { - "content": "Wow aren't you loving @TheRock and his candor? #Masterclass", - "contenttype": "text/plain", - "created": 1447637459000, - "id": "666065895932973057", - "language": "en" - }, - { - "content": "RT @patt_t: @TheRock @Oprah @RichOnOWN @OWNTV this interview makes me like you as a fellow human even more for being so real.", - "contenttype": "text/plain", - "created": 1447637030000, - "id": "666064097562247168", - "language": "en" - }, - { - "content": "\"Be You\".. That's the best advice ever @TheRock #MastersClass", - "contenttype": "text/plain", - "created": 1447636205000, - "id": "666060637181644800", - "language": "en" - }, - { - "content": "Supersoulers let's lift our spirits pray and hold Paris in the Light\ud83d\ude4f\ud83c\udffe", - "contenttype": "text/plain", - "created": 1447602477000, - "id": "665919171062927360", - "language": "en" - }, - { - "content": "RT @DeepakChopra: What I learned in week 1: Become What You Believe 21-Day Meditation Experience - https:\/\/t.co\/kqaMaMqEUp #GoogleAlerts", - "contenttype": "text/plain", - "created": 1447098990000, - "id": "663807393063538688", - "language": "en" - }, - { - "content": "Watching Bryan Stevenson on #SuperSoulSunday! \"You are not the worst mistake you ever made\".\nAren't we glad about that.", - "contenttype": "text/plain", - "created": 1446998643000, - "id": "663386507856736257", - "language": "en" - }, - { - "content": ".@CherylStrayed BRAVE ENOUGH my new favorite thing! Gonna buy a copy for all my girls. #Perfectgift https:\/\/t.co\/gz1tnv8t8K", - "contenttype": "text/plain", - "created": 1446915955000, - "id": "663039689360695296", - "language": "en" - }, - { - "content": "Stevie Wonder singing \"Happy Birthday to you!\" to my dear mariashriver. A phenomenal woman and\u2026 https:\/\/t.co\/Ygm5eDIs4f", - "contenttype": "text/plain", - "created": 1446881193000, - "id": "662893888080879616", - "language": "en" - }, - { - "content": "It\u2019s my faaaaavorite time of the Year! For the first time you can shop the list on @amazon! https:\/\/t.co\/a6GMvVrhjN https:\/\/t.co\/sJlQMROq5U", - "contenttype": "text/plain", - "created": 1446744186000, - "id": "662319239844380672", - "language": "en" - }, - { - "content": "Incredible story \"the spirit of the Lord is on you\" thanks for sharing @smokey_robinson #Masterclass", - "contenttype": "text/plain", - "created": 1446428929000, - "id": "660996956861280256", - "language": "en" - }, - { - "content": "Wasnt that incredible story about @smokey_robinson 's dad leaving his family at 12. #MasterClass", - "contenttype": "text/plain", - "created": 1446426630000, - "id": "660987310889041920", - "language": "en" - }, - { - "content": "Gayle, Charlie, Nora @CBSThisMorning Congratulations! #1000thshow", - "contenttype": "text/plain", - "created": 1446220097000, - "id": "660121050978611205", - "language": "en" - }, - { - "content": "I believe your home should rise up to meet you. @TheEllenShow you nailed it with HOME. Tweethearts, grab a copy! https:\/\/t.co\/iFMnpRAsno", - "contenttype": "text/plain", - "created": 1446074433000, - "id": "659510090748182528", - "language": "en" - }, - { - "content": "Can I get a Witness?!\u270b\ud83c\udffe https:\/\/t.co\/tZ1QyAeSdE", - "contenttype": "text/plain", - "created": 1445821114000, - "id": "658447593865945089", - "language": "en" - }, - { - "content": ".@TheEllenShow you're a treasure.\nYour truth set a lot of people free.\n#Masterclass", - "contenttype": "text/plain", - "created": 1445821003000, - "id": "658447130026188800", - "language": "en" - }, - { - "content": "Hope you all are enjoying @TheEllenShow on #MasterClass.", - "contenttype": "text/plain", - "created": 1445820161000, - "id": "658443598313181188", - "language": "en" - }, - { - "content": ".@GloriaSteinem, shero to women everywhere, on how far we\u2019ve come and how far we need to go. #SuperSoulSunday 7p\/6c.\nhttps:\/\/t.co\/3e7oxXW02J", - "contenttype": "text/plain", - "created": 1445811545000, - "id": "658407457438363648", - "language": "en" - }, - { - "content": "RT @TheEllenShow: I told a story from my @OWNTV's #MasterClass on my show. Normally I\u2019d save it all for Sunday, but @Oprah made me. https:\/\u2026", - "contenttype": "text/plain", - "created": 1445804181000, - "id": "658376572521459712", - "language": "en" - }, - { - "content": ".@TheEllenShow is a master teacher of living her truth & living authentically as herself. #MasterClass tonight 8\/7c.\nhttps:\/\/t.co\/iLT2KgRsSw", - "contenttype": "text/plain", - "created": 1445804072000, - "id": "658376116575449088", - "language": "en" - }, - { - "content": ".@SheriSalata , @jonnysinc @part2pictures . Tears of joy and gratitude to you and our entire #BeliefTeam We DID IT!! My heart is full.\ud83d\ude4f\ud83c\udffe\ud83d\ude4f\ud83c\udffe", - "contenttype": "text/plain", - "created": 1445734755000, - "id": "658085377140363264", - "language": "en" - }, - { - "content": "Donna and Bob saw the tape of their story just days before she passed. They appreciated it. #RIPDonna", - "contenttype": "text/plain", - "created": 1445734097000, - "id": "658082618819280896", - "language": "en" - }, - { - "content": "RT @rempower: .@Oprah this series allowed me to slide into people's lives around the world and see the same in them ... we all have a belie\u2026", - "contenttype": "text/plain", - "created": 1445732769000, - "id": "658077046858383360", - "language": "en" - }, - { - "content": "All the stories moved me, My favorite line \" I must pass the stories on to my grandson otherwise our people will loose their way. #Belief", - "contenttype": "text/plain", - "created": 1445732579000, - "id": "658076253618991104", - "language": "en" - }, - { - "content": ".@part2pictures some of your best imagery yet. Filming Alex on the rock.#Belief", - "contenttype": "text/plain", - "created": 1445731782000, - "id": "658072908237934592", - "language": "en" - }, - { - "content": "I just love Alex and his daring #Belief to live fully the present Moment.", - "contenttype": "text/plain", - "created": 1445731561000, - "id": "658071980982206464", - "language": "en" - }, - { - "content": "RT @GrowingOWN: Let's do this! #Belief finale tweet tweet party. Thank you @Oprah! \ud83d\ude4f", - "contenttype": "text/plain", - "created": 1445731248000, - "id": "658070668785770496", - "language": "en" - }, - { - "content": "RT @lizkinnell: The epic finale of #Belief on @OWNTV is about to start. 8\/et Are you ready? What do you Believe?", - "contenttype": "text/plain", - "created": 1445731081000, - "id": "658069968534171648", - "language": "en" - }, - { - "content": "Thank you all for another beautiful night of Belief. Belief runs all day tomorrow for bingers and final episode!", - "contenttype": "text/plain", - "created": 1445648630000, - "id": "657724143115202560", - "language": "en" - }, - { - "content": "RT @OWNingLight: #Belief is the ultimate travel map to mass acceptance. \ud83d\ude4f\ud83c\udffb\u2764\ufe0f\ud83c\udf0d @Oprah", - "contenttype": "text/plain", - "created": 1445647285000, - "id": "657718501147197442", - "language": "en" - }, - { - "content": "\" I can feel my heart opening and faith coming back in\".. What's better than that? #Belief", - "contenttype": "text/plain", - "created": 1445646903000, - "id": "657716901951369218", - "language": "en" - }, - { - "content": "Hey Belief team mates can yo believe how quickly the week has passed? #Belief", - "contenttype": "text/plain", - "created": 1445645633000, - "id": "657711572492533760", - "language": "en" - }, - { - "content": "Ran into @5SOS backstage. Fun times with @TheEllenShow today! https:\/\/t.co\/2PP3W3RzXc", - "contenttype": "text/plain", - "created": 1445618531000, - "id": "657597898394173440", - "language": "en" - }, - { - "content": "Thanks All for another great night of #BELIEF", - "contenttype": "text/plain", - "created": 1445572548000, - "id": "657405031822430208", - "language": "en" - }, - { - "content": "RT @3LWTV: #BecomeWhatYouBelieve New meditation w\/ @Oprah @DeepakChopra begins 11\/2 Register https:\/\/t.co\/x0R9HWTAX0 #Belief https:\/\/t.co\/\u2026", - "contenttype": "text/plain", - "created": 1445571500000, - "id": "657400636745510912", - "language": "en" - }, - { - "content": "Ok west coast let's do it! #belief", - "contenttype": "text/plain", - "created": 1445569367000, - "id": "657391689439404033", - "language": "en" - }, - { - "content": "Thank u kind gentleman who told me I had kale in my teeth. Was eating kale chips with Quincy Jones. Went straight to @LairdLife party.", - "contenttype": "text/plain", - "created": 1445569296000, - "id": "657391393883619328", - "language": "en" - }, - { - "content": "Hello west coast twitterati.. See you at 8 for #Belief", - "contenttype": "text/plain", - "created": 1445566144000, - "id": "657378171872874496", - "language": "en" - }, - { - "content": "Thank you all for another beautiful night.#Belief", - "contenttype": "text/plain", - "created": 1445475948000, - "id": "656999861254918145", - "language": "en" - }, - { - "content": "RT @PRanganathan: \"Transformation is the rule of the game. The universe is not standing still.\" - Marcelo @OWNTV @Oprah #Belief", - "contenttype": "text/plain", - "created": 1445475602000, - "id": "656998409933451264", - "language": "en" - }, - { - "content": "\"The Universe is not standing still.. The whole Universe is expanding\" I love the dance between science and spirituality! #Belief", - "contenttype": "text/plain", - "created": 1445475580000, - "id": "656998320133398528", - "language": "en" - }, - { - "content": "\"Without our prayers and our songs we won't be here\" Apache leader.#Belief", - "contenttype": "text/plain", - "created": 1445473768000, - "id": "656990717504393216", - "language": "en" - }, - { - "content": "Notice her mother crying. She knows its last tine she will ever see her daughter.#Belief", - "contenttype": "text/plain", - "created": 1445473150000, - "id": "656988127433637888", - "language": "en" - }, - { - "content": "This final trial is unbelievable. Every hair gets pulled from her head one at a time. Now that is Something!!#Belief", - "contenttype": "text/plain", - "created": 1445473063000, - "id": "656987763644891136", - "language": "en" - }, - { - "content": "\"What my faith gives me no one can match\"#Belief", - "contenttype": "text/plain", - "created": 1445472961000, - "id": "656987336266223616", - "language": "en" - }, - { - "content": "It's a devotion to faith beyond anything Ive seen. Fascinating.Jain nuns. #Belief", - "contenttype": "text/plain", - "created": 1445472531000, - "id": "656985529951522816", - "language": "en" - }, - { - "content": "I'd never heard of Jain monks and nuns before doing this series. #Belief", - "contenttype": "text/plain", - "created": 1445472393000, - "id": "656984953037586433", - "language": "en" - }, - { - "content": "Good evening Team #Belief the Tweet is on!", - "contenttype": "text/plain", - "created": 1445472098000, - "id": "656983714883239937", - "language": "en" - }, - { - "content": "Thanks everyone for another Epic #Belief night!", - "contenttype": "text/plain", - "created": 1445302792000, - "id": "656273592485810176", - "language": "en" - }, - { - "content": "The Pastor and Imam represent the possibility of PEACE in the world. If they can do it. We can. Peace to All\ud83d\ude4f\ud83c\udffe #Belief", - "contenttype": "text/plain", - "created": 1445302749000, - "id": "656273415280705536", - "language": "en" - }, - { - "content": "We felt privileged to film the Hajj and explore the beauty of Islam.\n#99namesforGod #Belief", - "contenttype": "text/plain", - "created": 1445301110000, - "id": "656266540967424000", - "language": "en" - }, - { - "content": "Do you all believe in \"soul mates\"?\n#Belief", - "contenttype": "text/plain", - "created": 1445300138000, - "id": "656262462426238976", - "language": "en" - }, - { - "content": ".@RevEdBacon thank you so much for hosting tonight's #Belief at All Saints Church.", - "contenttype": "text/plain", - "created": 1445299749000, - "id": "656260832628756480", - "language": "en" - }, - { - "content": "This is one of the best love stories I've ever seen. #belief Ian and Larissa showing us the depths of love.#Belief", - "contenttype": "text/plain", - "created": 1445299614000, - "id": "656260263604310016", - "language": "en" - }, - { - "content": "Hey Everyone .. Tweeting from a bar in Atlanta with @SheriSalata OWN not in my hotel. Anything for #Belief", - "contenttype": "text/plain", - "created": 1445299326000, - "id": "656259057758654464", - "language": "en" - }, - { - "content": "RT @joshuadubois: When you see Ian & Larissa tonight on #BELIEF, you'll know what Christian love is all about. 8pmET, @OWNTV. Tune in! http\u2026", - "contenttype": "text/plain", - "created": 1445295716000, - "id": "656243916224638976", - "language": "en" - }, - { - "content": "RT @KimHaleDance: I Believe LAUGHTER. IS. CONTAGIOUS. What do you believe? See you tonight! 8\/7c.\u2764\ufe0f #Belief #Beliefin3words https:\/\/t.co\/x\u2026", - "contenttype": "text/plain", - "created": 1445295702000, - "id": "656243854610337793", - "language": "en" - }, - { - "content": "RT @OWNTV: See the world through someone else\u2019s soul. The epic journey of #Belief continues tonight at 8\/7c.\nhttps:\/\/t.co\/UKKMHZuC0g", - "contenttype": "text/plain", - "created": 1445295668000, - "id": "656243714507931648", - "language": "en" - }, - { - "content": "RT @OWNTV: Mendel Hurwitz's inquisitive nature grounded him in his faith. See where it's taken him now: https:\/\/t.co\/2iWmNOxK9r https:\/\/t.c\u2026", - "contenttype": "text/plain", - "created": 1445295661000, - "id": "656243684720050176", - "language": "en" - }, - { - "content": "Thank you for opening up the heart space and letting #Belief in. Tonight it keeps getting better. See you at 8\/7c\nhttps:\/\/t.co\/E65vkTray9", - "contenttype": "text/plain", - "created": 1445279425000, - "id": "656175584943341568", - "language": "en" - }, - { - "content": "I believe in the @weightwatchers program so much I decided to invest, join the Board, and partner in #wwfamily evolution.", - "contenttype": "text/plain", - "created": 1445275802000, - "id": "656160388526899200", - "language": "en" - }, - { - "content": "RT @AVAETC: Debut episode of #BELIEF has now aired on both coasts. Trended for 4 hours. Brava, @Oprah + team. 6 beautiful nights to come. B\u2026", - "contenttype": "text/plain", - "created": 1445229489000, - "id": "655966138279432192", - "language": "en" - }, - { - "content": "RT @3LWTV: 6 more epic nights of #Belief to come! See you tomorrow 8pET\/7pCT for the next installment! @OWNTV @Oprah", - "contenttype": "text/plain", - "created": 1445227342000, - "id": "655957135688241152", - "language": "en" - }, - { - "content": "RT @ledisi: I love how Ancestry and Tradition is honored throughout every story in #Belief @OWNTV @OWN Thank you @Oprah this is so importa\u2026", - "contenttype": "text/plain", - "created": 1445225935000, - "id": "655951232981295104", - "language": "en" - }, - { - "content": "RT @UN: Showing #Belief at the UN \"is a bigger dream than I had\" - @Oprah https:\/\/t.co\/VC4OqD8yub #Belief #GlobalGoals https:\/\/t.co\/LZyGuC7\u2026", - "contenttype": "text/plain", - "created": 1445225228000, - "id": "655948267868426240", - "language": "en" - }, - { - "content": "RT @UzoAduba: To seek, to question, to learn, to teach, to pass it on; some of the breathtaking themes running like water throughout #Belie\u2026", - "contenttype": "text/plain", - "created": 1445225008000, - "id": "655947345197076480", - "language": "en" - }, - { - "content": "RT @iamtikasumpter: #Belief had me in awe. Faith in the divine is the constant that links us together. It's all beautiful and righteous. Gi\u2026", - "contenttype": "text/plain", - "created": 1445224852000, - "id": "655946689249828864", - "language": "en" - }, - { - "content": "West Coast... Here we go. #Belief", - "contenttype": "text/plain", - "created": 1445224140000, - "id": "655943701840048128", - "language": "en" - }, - { - "content": "Big surprise at icanady watch party. Epic night. #Belief. So much more to come. https:\/\/t.co\/kBDtFwGyQs", - "contenttype": "text/plain", - "created": 1445220694000, - "id": "655929249669378048", - "language": "en" - }, - { - "content": "I loved the Mendel story so much because it represents right of passasge. \" bye bye to childhood\".#Belief", - "contenttype": "text/plain", - "created": 1445215032000, - "id": "655905500056391682", - "language": "en" - }, - { - "content": "RT @squee_machine: This is a visual feast! Completely gorgeous and I am transfixed. The colors, the composition, cinematography, vibe. #Bel\u2026", - "contenttype": "text/plain", - "created": 1445214538000, - "id": "655903432079904768", - "language": "en" - }, - { - "content": "RT @JamesTyphany: Looking at @ #Belief I really needed this in my life thanks @OWNTV", - "contenttype": "text/plain", - "created": 1445214534000, - "id": "655903413385891840", - "language": "en" - }, - { - "content": "Just surprised a \"watch party\" @icanady 's house. #Belief http:\/\/t.co\/Di0I3OooCh", - "contenttype": "text/plain", - "created": 1445214502000, - "id": "655903277796732931", - "language": "en" - }, - { - "content": "RT @MsLaWandaa: I love moments of sitting among elders and learning. I can feel this moment was special for @ReshThakkar #Belief", - "contenttype": "text/plain", - "created": 1445214498000, - "id": "655903264374812672", - "language": "en" - }, - { - "content": "RT @xonecole: \"Do you have to have religion or is being a good person...is that enough?\" #Belief", - "contenttype": "text/plain", - "created": 1445214339000, - "id": "655902594171203584", - "language": "en" - }, - { - "content": "RT @ChivonJohn: Very inspired by the stories on #belief https:\/\/t.co\/uMRCCfCWcY", - "contenttype": "text/plain", - "created": 1445214327000, - "id": "655902545903140864", - "language": "en" - }, - { - "content": "RT @KiranSArora: powerful personal story that many of us can connect to. searching for what's missing, spiritual liberation. @ReshThakkar #\u2026", - "contenttype": "text/plain", - "created": 1445214128000, - "id": "655901708506103812", - "language": "en" - }, - { - "content": "RT @createdbyerica: \"I'm willing to go as far as I have to go to get that feeling in my heart of being connected to the Divine.\" - Reshma @\u2026", - "contenttype": "text/plain", - "created": 1445213967000, - "id": "655901033952993280", - "language": "en" - }, - { - "content": "RT @UrbnHealthNP: I am enjoying this so much. #Belief", - "contenttype": "text/plain", - "created": 1445213904000, - "id": "655900772467474435", - "language": "en" - }, - { - "content": "RT @DrMABrown: Your relationship with #belief can be completely different than how others experience it", - "contenttype": "text/plain", - "created": 1445213901000, - "id": "655900756604620800", - "language": "en" - }, - { - "content": "RT @ckerfin: On another river on the other side of the world... transition between stories, drawing connections to different beliefs #Belie\u2026", - "contenttype": "text/plain", - "created": 1445213721000, - "id": "655900002644987905", - "language": "en" - }, - { - "content": "RT @nikfarrior: So profound. We are all born into #Belief @Oprah @OWN @OWNTV", - "contenttype": "text/plain", - "created": 1445213706000, - "id": "655899942242775040", - "language": "en" - }, - { - "content": "RT @fgaboys: @Oprah the start of #Belief is riveting and edifying. It makes you want more and inspires you too see what's coming up next. \u2026", - "contenttype": "text/plain", - "created": 1445213699000, - "id": "655899910563217408", - "language": "en" - }, - { - "content": "RT @MalikaGhosh: Here we GO- let's lose ourselves #belief NOW @Oprah JOY http:\/\/t.co\/vYSNLd3LvC", - "contenttype": "text/plain", - "created": 1445212831000, - "id": "655896269542420480", - "language": "en" - }, - { - "content": "RT @MastinKipp: .@Oprah and team are about to bring the Light with #Belief.", - "contenttype": "text/plain", - "created": 1445212747000, - "id": "655895916675600384", - "language": "en" - }, - { - "content": "RT @GrowingOWN: 7 minutes, y'all! #BELIEF", - "contenttype": "text/plain", - "created": 1445212465000, - "id": "655894734968217600", - "language": "en" - }, - { - "content": "RT @LPToussaint: BELIEF defines experience.Choose wisely #Belief Tonight on OWN", - "contenttype": "text/plain", - "created": 1445211845000, - "id": "655892134524878848", - "language": "en" - }, - { - "content": "RT @TheBeBeWinans: Congratulations to my dear friend @Oprah on the launch of your series #BELIEF #Tonight on @OWNTV. Your friendship inspir\u2026", - "contenttype": "text/plain", - "created": 1445211835000, - "id": "655892094905532416", - "language": "en" - }, - { - "content": "RT @UzoAduba: Thirty minutes to @Oprah #Belief. Pretty sure it's about to be our favorite thing.", - "contenttype": "text/plain", - "created": 1445211833000, - "id": "655892084314890240", - "language": "en" - }, - { - "content": "RT @DeandresVoice: Moments away from the start of @SuperSoulSunday and the first night of the epic #Belief series on @OWNTV - are you ready\u2026", - "contenttype": "text/plain", - "created": 1445209201000, - "id": "655881046102142978", - "language": "en" - }, - { - "content": "RT @jennaldewan: I CANT WAIT FOR THIS TONIGHT!!!!!! Got my popcorn, got my tissues I am readyyyyyy! #Belief https:\/\/t.co\/WluTdeEqal", - "contenttype": "text/plain", - "created": 1445209181000, - "id": "655880959535939584", - "language": "en" - }, - { - "content": "RT @indiaarie: U heard about @Oprah passion project? It's called #Belief - I've see some & Its special! Tonight - 7c\/8e - 7 nights!! Whose\u2026", - "contenttype": "text/plain", - "created": 1445208945000, - "id": "655879970732949504", - "language": "en" - }, - { - "content": "Wow, I liked @TheRock before, now I really SEE how special he is. The daughter story was IT for me. So great! #MasterClass", - "contenttype": "text/plain", - "created": 1447639154000, - "id": "666073008692314113", - "language": "en" - }, - { - "content": ".@TheRock how did you Know to listen to your gut and Not go back to football? #Masterclass", - "contenttype": "text/plain", - "created": 1447638226000, - "id": "666069114889179136", - "language": "en" - }, - { - "content": ".@TheRock moving back in with your parents so humbling. \" on the other side of your pain is something good if you can hold on\" #masterclass", - "contenttype": "text/plain", - "created": 1447638067000, - "id": "666068446325665792", - "language": "en" - }, - { - "content": "Wow aren't you loving @TheRock and his candor? #Masterclass", - "contenttype": "text/plain", - "created": 1447637459000, - "id": "666065895932973057", - "language": "en" - }, - { - "content": "RT @patt_t: @TheRock @Oprah @RichOnOWN @OWNTV this interview makes me like you as a fellow human even more for being so real.", - "contenttype": "text/plain", - "created": 1447637030000, - "id": "666064097562247168", - "language": "en" - }, - { - "content": "\"Be You\".. That's the best advice ever @TheRock #MastersClass", - "contenttype": "text/plain", - "created": 1447636205000, - "id": "666060637181644800", - "language": "en" - }, - { - "content": "Supersoulers let's lift our spirits pray and hold Paris in the Light\ud83d\ude4f\ud83c\udffe", - "contenttype": "text/plain", - "created": 1447602477000, - "id": "665919171062927360", - "language": "en" - }, - { - "content": "RT @DeepakChopra: What I learned in week 1: Become What You Believe 21-Day Meditation Experience - https:\/\/t.co\/kqaMaMqEUp #GoogleAlerts", - "contenttype": "text/plain", - "created": 1447098990000, - "id": "663807393063538688", - "language": "en" - }, - { - "content": "Watching Bryan Stevenson on #SuperSoulSunday! \"You are not the worst mistake you ever made\".\nAren't we glad about that.", - "contenttype": "text/plain", - "created": 1446998643000, - "id": "663386507856736257", - "language": "en" - }, - { - "content": ".@CherylStrayed BRAVE ENOUGH my new favorite thing! Gonna buy a copy for all my girls. #Perfectgift https:\/\/t.co\/gz1tnv8t8K", - "contenttype": "text/plain", - "created": 1446915955000, - "id": "663039689360695296", - "language": "en" - }, - { - "content": "Stevie Wonder singing \"Happy Birthday to you!\" to my dear mariashriver. A phenomenal woman and\u2026 https:\/\/t.co\/Ygm5eDIs4f", - "contenttype": "text/plain", - "created": 1446881193000, - "id": "662893888080879616", - "language": "en" - }, - { - "content": "It\u2019s my faaaaavorite time of the Year! For the first time you can shop the list on @amazon! https:\/\/t.co\/a6GMvVrhjN https:\/\/t.co\/sJlQMROq5U", - "contenttype": "text/plain", - "created": 1446744186000, - "id": "662319239844380672", - "language": "en" - }, - { - "content": "Incredible story \"the spirit of the Lord is on you\" thanks for sharing @smokey_robinson #Masterclass", - "contenttype": "text/plain", - "created": 1446428929000, - "id": "660996956861280256", - "language": "en" - }, - { - "content": "Wasnt that incredible story about @smokey_robinson 's dad leaving his family at 12. #MasterClass", - "contenttype": "text/plain", - "created": 1446426630000, - "id": "660987310889041920", - "language": "en" - }, - { - "content": "Gayle, Charlie, Nora @CBSThisMorning Congratulations! #1000thshow", - "contenttype": "text/plain", - "created": 1446220097000, - "id": "660121050978611205", - "language": "en" - }, - { - "content": "I believe your home should rise up to meet you. @TheEllenShow you nailed it with HOME. Tweethearts, grab a copy! https:\/\/t.co\/iFMnpRAsno", - "contenttype": "text/plain", - "created": 1446074433000, - "id": "659510090748182528", - "language": "en" - }, - { - "content": "Can I get a Witness?!\u270b\ud83c\udffe https:\/\/t.co\/tZ1QyAeSdE", - "contenttype": "text/plain", - "created": 1445821114000, - "id": "658447593865945089", - "language": "en" - }, - { - "content": ".@TheEllenShow you're a treasure.\nYour truth set a lot of people free.\n#Masterclass", - "contenttype": "text/plain", - "created": 1445821003000, - "id": "658447130026188800", - "language": "en" - }, - { - "content": "Hope you all are enjoying @TheEllenShow on #MasterClass.", - "contenttype": "text/plain", - "created": 1445820161000, - "id": "658443598313181188", - "language": "en" - }, - { - "content": ".@GloriaSteinem, shero to women everywhere, on how far we\u2019ve come and how far we need to go. #SuperSoulSunday 7p\/6c.\nhttps:\/\/t.co\/3e7oxXW02J", - "contenttype": "text/plain", - "created": 1445811545000, - "id": "658407457438363648", - "language": "en" - }, - { - "content": "RT @TheEllenShow: I told a story from my @OWNTV's #MasterClass on my show. Normally I\u2019d save it all for Sunday, but @Oprah made me. https:\/\u2026", - "contenttype": "text/plain", - "created": 1445804181000, - "id": "658376572521459712", - "language": "en" - }, - { - "content": ".@TheEllenShow is a master teacher of living her truth & living authentically as herself. #MasterClass tonight 8\/7c.\nhttps:\/\/t.co\/iLT2KgRsSw", - "contenttype": "text/plain", - "created": 1445804072000, - "id": "658376116575449088", - "language": "en" - }, - { - "content": ".@SheriSalata , @jonnysinc @part2pictures . Tears of joy and gratitude to you and our entire #BeliefTeam We DID IT!! My heart is full.\ud83d\ude4f\ud83c\udffe\ud83d\ude4f\ud83c\udffe", - "contenttype": "text/plain", - "created": 1445734755000, - "id": "658085377140363264", - "language": "en" - }, - { - "content": "Donna and Bob saw the tape of their story just days before she passed. They appreciated it. #RIPDonna", - "contenttype": "text/plain", - "created": 1445734097000, - "id": "658082618819280896", - "language": "en" - }, - { - "content": "RT @rempower: .@Oprah this series allowed me to slide into people's lives around the world and see the same in them ... we all have a belie\u2026", - "contenttype": "text/plain", - "created": 1445732769000, - "id": "658077046858383360", - "language": "en" - }, - { - "content": "All the stories moved me, My favorite line \" I must pass the stories on to my grandson otherwise our people will loose their way. #Belief", - "contenttype": "text/plain", - "created": 1445732579000, - "id": "658076253618991104", - "language": "en" - }, - { - "content": ".@part2pictures some of your best imagery yet. Filming Alex on the rock.#Belief", - "contenttype": "text/plain", - "created": 1445731782000, - "id": "658072908237934592", - "language": "en" - }, - { - "content": "I just love Alex and his daring #Belief to live fully the present Moment.", - "contenttype": "text/plain", - "created": 1445731561000, - "id": "658071980982206464", - "language": "en" - }, - { - "content": "RT @GrowingOWN: Let's do this! #Belief finale tweet tweet party. Thank you @Oprah! \ud83d\ude4f", - "contenttype": "text/plain", - "created": 1445731248000, - "id": "658070668785770496", - "language": "en" - }, - { - "content": "RT @lizkinnell: The epic finale of #Belief on @OWNTV is about to start. 8\/et Are you ready? What do you Believe?", - "contenttype": "text/plain", - "created": 1445731081000, - "id": "658069968534171648", - "language": "en" - }, - { - "content": "Thank you all for another beautiful night of Belief. Belief runs all day tomorrow for bingers and final episode!", - "contenttype": "text/plain", - "created": 1445648630000, - "id": "657724143115202560", - "language": "en" - }, - { - "content": "RT @OWNingLight: #Belief is the ultimate travel map to mass acceptance. \ud83d\ude4f\ud83c\udffb\u2764\ufe0f\ud83c\udf0d @Oprah", - "contenttype": "text/plain", - "created": 1445647285000, - "id": "657718501147197442", - "language": "en" - }, - { - "content": "\" I can feel my heart opening and faith coming back in\".. What's better than that? #Belief", - "contenttype": "text/plain", - "created": 1445646903000, - "id": "657716901951369218", - "language": "en" - }, - { - "content": "Hey Belief team mates can yo believe how quickly the week has passed? #Belief", - "contenttype": "text/plain", - "created": 1445645633000, - "id": "657711572492533760", - "language": "en" - }, - { - "content": "Ran into @5SOS backstage. Fun times with @TheEllenShow today! https:\/\/t.co\/2PP3W3RzXc", - "contenttype": "text/plain", - "created": 1445618531000, - "id": "657597898394173440", - "language": "en" - }, - { - "content": "Thanks All for another great night of #BELIEF", - "contenttype": "text/plain", - "created": 1445572548000, - "id": "657405031822430208", - "language": "en" - }, - { - "content": "RT @3LWTV: #BecomeWhatYouBelieve New meditation w\/ @Oprah @DeepakChopra begins 11\/2 Register https:\/\/t.co\/x0R9HWTAX0 #Belief https:\/\/t.co\/\u2026", - "contenttype": "text/plain", - "created": 1445571500000, - "id": "657400636745510912", - "language": "en" - }, - { - "content": "Ok west coast let's do it! #belief", - "contenttype": "text/plain", - "created": 1445569367000, - "id": "657391689439404033", - "language": "en" - }, - { - "content": "Thank u kind gentleman who told me I had kale in my teeth. Was eating kale chips with Quincy Jones. Went straight to @LairdLife party.", - "contenttype": "text/plain", - "created": 1445569296000, - "id": "657391393883619328", - "language": "en" - }, - { - "content": "Hello west coast twitterati.. See you at 8 for #Belief", - "contenttype": "text/plain", - "created": 1445566144000, - "id": "657378171872874496", - "language": "en" - }, - { - "content": "Thank you all for another beautiful night.#Belief", - "contenttype": "text/plain", - "created": 1445475948000, - "id": "656999861254918145", - "language": "en" - }, - { - "content": "RT @PRanganathan: \"Transformation is the rule of the game. The universe is not standing still.\" - Marcelo @OWNTV @Oprah #Belief", - "contenttype": "text/plain", - "created": 1445475602000, - "id": "656998409933451264", - "language": "en" - }, - { - "content": "\"The Universe is not standing still.. The whole Universe is expanding\" I love the dance between science and spirituality! #Belief", - "contenttype": "text/plain", - "created": 1445475580000, - "id": "656998320133398528", - "language": "en" - }, - { - "content": "\"Without our prayers and our songs we won't be here\" Apache leader.#Belief", - "contenttype": "text/plain", - "created": 1445473768000, - "id": "656990717504393216", - "language": "en" - }, - { - "content": "Notice her mother crying. She knows its last tine she will ever see her daughter.#Belief", - "contenttype": "text/plain", - "created": 1445473150000, - "id": "656988127433637888", - "language": "en" - }, - { - "content": "This final trial is unbelievable. Every hair gets pulled from her head one at a time. Now that is Something!!#Belief", - "contenttype": "text/plain", - "created": 1445473063000, - "id": "656987763644891136", - "language": "en" - }, - { - "content": "\"What my faith gives me no one can match\"#Belief", - "contenttype": "text/plain", - "created": 1445472961000, - "id": "656987336266223616", - "language": "en" - }, - { - "content": "It's a devotion to faith beyond anything Ive seen. Fascinating.Jain nuns. #Belief", - "contenttype": "text/plain", - "created": 1445472531000, - "id": "656985529951522816", - "language": "en" - }, - { - "content": "I'd never heard of Jain monks and nuns before doing this series. #Belief", - "contenttype": "text/plain", - "created": 1445472393000, - "id": "656984953037586433", - "language": "en" - }, - { - "content": "Good evening Team #Belief the Tweet is on!", - "contenttype": "text/plain", - "created": 1445472098000, - "id": "656983714883239937", - "language": "en" - }, - { - "content": "Thanks everyone for another Epic #Belief night!", - "contenttype": "text/plain", - "created": 1445302792000, - "id": "656273592485810176", - "language": "en" - }, - { - "content": "The Pastor and Imam represent the possibility of PEACE in the world. If they can do it. We can. Peace to All\ud83d\ude4f\ud83c\udffe #Belief", - "contenttype": "text/plain", - "created": 1445302749000, - "id": "656273415280705536", - "language": "en" - }, - { - "content": "We felt privileged to film the Hajj and explore the beauty of Islam.\n#99namesforGod #Belief", - "contenttype": "text/plain", - "created": 1445301110000, - "id": "656266540967424000", - "language": "en" - }, - { - "content": "Do you all believe in \"soul mates\"?\n#Belief", - "contenttype": "text/plain", - "created": 1445300138000, - "id": "656262462426238976", - "language": "en" - }, - { - "content": ".@RevEdBacon thank you so much for hosting tonight's #Belief at All Saints Church.", - "contenttype": "text/plain", - "created": 1445299749000, - "id": "656260832628756480", - "language": "en" - }, - { - "content": "This is one of the best love stories I've ever seen. #belief Ian and Larissa showing us the depths of love.#Belief", - "contenttype": "text/plain", - "created": 1445299614000, - "id": "656260263604310016", - "language": "en" - }, - { - "content": "Hey Everyone .. Tweeting from a bar in Atlanta with @SheriSalata OWN not in my hotel. Anything for #Belief", - "contenttype": "text/plain", - "created": 1445299326000, - "id": "656259057758654464", - "language": "en" - }, - { - "content": "RT @joshuadubois: When you see Ian & Larissa tonight on #BELIEF, you'll know what Christian love is all about. 8pmET, @OWNTV. Tune in! http\u2026", - "contenttype": "text/plain", - "created": 1445295716000, - "id": "656243916224638976", - "language": "en" - }, - { - "content": "RT @KimHaleDance: I Believe LAUGHTER. IS. CONTAGIOUS. What do you believe? See you tonight! 8\/7c.\u2764\ufe0f #Belief #Beliefin3words https:\/\/t.co\/x\u2026", - "contenttype": "text/plain", - "created": 1445295702000, - "id": "656243854610337793", - "language": "en" - }, - { - "content": "RT @OWNTV: See the world through someone else\u2019s soul. The epic journey of #Belief continues tonight at 8\/7c.\nhttps:\/\/t.co\/UKKMHZuC0g", - "contenttype": "text/plain", - "created": 1445295668000, - "id": "656243714507931648", - "language": "en" - }, - { - "content": "RT @OWNTV: Mendel Hurwitz's inquisitive nature grounded him in his faith. See where it's taken him now: https:\/\/t.co\/2iWmNOxK9r https:\/\/t.c\u2026", - "contenttype": "text/plain", - "created": 1445295661000, - "id": "656243684720050176", - "language": "en" - }, - { - "content": "Thank you for opening up the heart space and letting #Belief in. Tonight it keeps getting better. See you at 8\/7c\nhttps:\/\/t.co\/E65vkTray9", - "contenttype": "text/plain", - "created": 1445279425000, - "id": "656175584943341568", - "language": "en" - }, - { - "content": "I believe in the @weightwatchers program so much I decided to invest, join the Board, and partner in #wwfamily evolution.", - "contenttype": "text/plain", - "created": 1445275802000, - "id": "656160388526899200", - "language": "en" - }, - { - "content": "RT @AVAETC: Debut episode of #BELIEF has now aired on both coasts. Trended for 4 hours. Brava, @Oprah + team. 6 beautiful nights to come. B\u2026", - "contenttype": "text/plain", - "created": 1445229489000, - "id": "655966138279432192", - "language": "en" - }, - { - "content": "RT @3LWTV: 6 more epic nights of #Belief to come! See you tomorrow 8pET\/7pCT for the next installment! @OWNTV @Oprah", - "contenttype": "text/plain", - "created": 1445227342000, - "id": "655957135688241152", - "language": "en" - }, - { - "content": "RT @ledisi: I love how Ancestry and Tradition is honored throughout every story in #Belief @OWNTV @OWN Thank you @Oprah this is so importa\u2026", - "contenttype": "text/plain", - "created": 1445225935000, - "id": "655951232981295104", - "language": "en" - }, - { - "content": "RT @UN: Showing #Belief at the UN \"is a bigger dream than I had\" - @Oprah https:\/\/t.co\/VC4OqD8yub #Belief #GlobalGoals https:\/\/t.co\/LZyGuC7\u2026", - "contenttype": "text/plain", - "created": 1445225228000, - "id": "655948267868426240", - "language": "en" - }, - { - "content": "RT @UzoAduba: To seek, to question, to learn, to teach, to pass it on; some of the breathtaking themes running like water throughout #Belie\u2026", - "contenttype": "text/plain", - "created": 1445225008000, - "id": "655947345197076480", - "language": "en" - }, - { - "content": "RT @iamtikasumpter: #Belief had me in awe. Faith in the divine is the constant that links us together. It's all beautiful and righteous. Gi\u2026", - "contenttype": "text/plain", - "created": 1445224852000, - "id": "655946689249828864", - "language": "en" - }, - { - "content": "West Coast... Here we go. #Belief", - "contenttype": "text/plain", - "created": 1445224140000, - "id": "655943701840048128", - "language": "en" - }, - { - "content": "Big surprise at icanady watch party. Epic night. #Belief. So much more to come. https:\/\/t.co\/kBDtFwGyQs", - "contenttype": "text/plain", - "created": 1445220694000, - "id": "655929249669378048", - "language": "en" - }, - { - "content": "I loved the Mendel story so much because it represents right of passasge. \" bye bye to childhood\".#Belief", - "contenttype": "text/plain", - "created": 1445215032000, - "id": "655905500056391682", - "language": "en" - }, - { - "content": "RT @squee_machine: This is a visual feast! Completely gorgeous and I am transfixed. The colors, the composition, cinematography, vibe. #Bel\u2026", - "contenttype": "text/plain", - "created": 1445214538000, - "id": "655903432079904768", - "language": "en" - }, - { - "content": "RT @JamesTyphany: Looking at @ #Belief I really needed this in my life thanks @OWNTV", - "contenttype": "text/plain", - "created": 1445214534000, - "id": "655903413385891840", - "language": "en" - }, - { - "content": "Just surprised a \"watch party\" @icanady 's house. #Belief http:\/\/t.co\/Di0I3OooCh", - "contenttype": "text/plain", - "created": 1445214502000, - "id": "655903277796732931", - "language": "en" - }, - { - "content": "RT @MsLaWandaa: I love moments of sitting among elders and learning. I can feel this moment was special for @ReshThakkar #Belief", - "contenttype": "text/plain", - "created": 1445214498000, - "id": "655903264374812672", - "language": "en" - }, - { - "content": "RT @xonecole: \"Do you have to have religion or is being a good person...is that enough?\" #Belief", - "contenttype": "text/plain", - "created": 1445214339000, - "id": "655902594171203584", - "language": "en" - }, - { - "content": "RT @ChivonJohn: Very inspired by the stories on #belief https:\/\/t.co\/uMRCCfCWcY", - "contenttype": "text/plain", - "created": 1445214327000, - "id": "655902545903140864", - "language": "en" - }, - { - "content": "RT @KiranSArora: powerful personal story that many of us can connect to. searching for what's missing, spiritual liberation. @ReshThakkar #\u2026", - "contenttype": "text/plain", - "created": 1445214128000, - "id": "655901708506103812", - "language": "en" - }, - { - "content": "RT @createdbyerica: \"I'm willing to go as far as I have to go to get that feeling in my heart of being connected to the Divine.\" - Reshma @\u2026", - "contenttype": "text/plain", - "created": 1445213967000, - "id": "655901033952993280", - "language": "en" - }, - { - "content": "RT @UrbnHealthNP: I am enjoying this so much. #Belief", - "contenttype": "text/plain", - "created": 1445213904000, - "id": "655900772467474435", - "language": "en" - }, - { - "content": "RT @DrMABrown: Your relationship with #belief can be completely different than how others experience it", - "contenttype": "text/plain", - "created": 1445213901000, - "id": "655900756604620800", - "language": "en" - }, - { - "content": "RT @ckerfin: On another river on the other side of the world... transition between stories, drawing connections to different beliefs #Belie\u2026", - "contenttype": "text/plain", - "created": 1445213721000, - "id": "655900002644987905", - "language": "en" - }, - { - "content": "RT @nikfarrior: So profound. We are all born into #Belief @Oprah @OWN @OWNTV", - "contenttype": "text/plain", - "created": 1445213706000, - "id": "655899942242775040", - "language": "en" - }, - { - "content": "RT @fgaboys: @Oprah the start of #Belief is riveting and edifying. It makes you want more and inspires you too see what's coming up next. \u2026", - "contenttype": "text/plain", - "created": 1445213699000, - "id": "655899910563217408", - "language": "en" - }, - { - "content": "RT @MalikaGhosh: Here we GO- let's lose ourselves #belief NOW @Oprah JOY http:\/\/t.co\/vYSNLd3LvC", - "contenttype": "text/plain", - "created": 1445212831000, - "id": "655896269542420480", - "language": "en" - }, - { - "content": "RT @MastinKipp: .@Oprah and team are about to bring the Light with #Belief.", - "contenttype": "text/plain", - "created": 1445212747000, - "id": "655895916675600384", - "language": "en" - }, - { - "content": "RT @GrowingOWN: 7 minutes, y'all! #BELIEF", - "contenttype": "text/plain", - "created": 1445212465000, - "id": "655894734968217600", - "language": "en" - }, - { - "content": "RT @LPToussaint: BELIEF defines experience.Choose wisely #Belief Tonight on OWN", - "contenttype": "text/plain", - "created": 1445211845000, - "id": "655892134524878848", - "language": "en" - }, - { - "content": "RT @TheBeBeWinans: Congratulations to my dear friend @Oprah on the launch of your series #BELIEF #Tonight on @OWNTV. Your friendship inspir\u2026", - "contenttype": "text/plain", - "created": 1445211835000, - "id": "655892094905532416", - "language": "en" - }, - { - "content": "RT @UzoAduba: Thirty minutes to @Oprah #Belief. Pretty sure it's about to be our favorite thing.", - "contenttype": "text/plain", - "created": 1445211833000, - "id": "655892084314890240", - "language": "en" - }, - { - "content": "RT @DeandresVoice: Moments away from the start of @SuperSoulSunday and the first night of the epic #Belief series on @OWNTV - are you ready\u2026", - "contenttype": "text/plain", - "created": 1445209201000, - "id": "655881046102142978", - "language": "en" - }, - { - "content": "RT @jennaldewan: I CANT WAIT FOR THIS TONIGHT!!!!!! Got my popcorn, got my tissues I am readyyyyyy! #Belief https:\/\/t.co\/WluTdeEqal", - "contenttype": "text/plain", - "created": 1445209181000, - "id": "655880959535939584", - "language": "en" - }, - { - "content": "RT @indiaarie: U heard about @Oprah passion project? It's called #Belief - I've see some & Its special! Tonight - 7c\/8e - 7 nights!! Whose\u2026", - "contenttype": "text/plain", - "created": 1445208945000, - "id": "655879970732949504", - "language": "en" - }, - { - "content": "Wow, I liked @TheRock before, now I really SEE how special he is. The daughter story was IT for me. So great! #MasterClass", - "contenttype": "text/plain", - "created": 1447639154000, - "id": "666073008692314113", - "language": "en" - }, - { - "content": ".@TheRock how did you Know to listen to your gut and Not go back to football? #Masterclass", - "contenttype": "text/plain", - "created": 1447638226000, - "id": "666069114889179136", - "language": "en" - }, - { - "content": ".@TheRock moving back in with your parents so humbling. \" on the other side of your pain is something good if you can hold on\" #masterclass", - "contenttype": "text/plain", - "created": 1447638067000, - "id": "666068446325665792", - "language": "en" - }, - { - "content": "Wow aren't you loving @TheRock and his candor? #Masterclass", - "contenttype": "text/plain", - "created": 1447637459000, - "id": "666065895932973057", - "language": "en" - }, - { - "content": "RT @patt_t: @TheRock @Oprah @RichOnOWN @OWNTV this interview makes me like you as a fellow human even more for being so real.", - "contenttype": "text/plain", - "created": 1447637030000, - "id": "666064097562247168", - "language": "en" - }, - { - "content": "\"Be You\".. That's the best advice ever @TheRock #MastersClass", - "contenttype": "text/plain", - "created": 1447636205000, - "id": "666060637181644800", - "language": "en" - }, - { - "content": "Supersoulers let's lift our spirits pray and hold Paris in the Light\ud83d\ude4f\ud83c\udffe", - "contenttype": "text/plain", - "created": 1447602477000, - "id": "665919171062927360", - "language": "en" - }, - { - "content": "RT @DeepakChopra: What I learned in week 1: Become What You Believe 21-Day Meditation Experience - https:\/\/t.co\/kqaMaMqEUp #GoogleAlerts", - "contenttype": "text/plain", - "created": 1447098990000, - "id": "663807393063538688", - "language": "en" - }, - { - "content": "Watching Bryan Stevenson on #SuperSoulSunday! \"You are not the worst mistake you ever made\".\nAren't we glad about that.", - "contenttype": "text/plain", - "created": 1446998643000, - "id": "663386507856736257", - "language": "en" - }, - { - "content": ".@CherylStrayed BRAVE ENOUGH my new favorite thing! Gonna buy a copy for all my girls. #Perfectgift https:\/\/t.co\/gz1tnv8t8K", - "contenttype": "text/plain", - "created": 1446915955000, - "id": "663039689360695296", - "language": "en" - }, - { - "content": "Stevie Wonder singing \"Happy Birthday to you!\" to my dear mariashriver. A phenomenal woman and\u2026 https:\/\/t.co\/Ygm5eDIs4f", - "contenttype": "text/plain", - "created": 1446881193000, - "id": "662893888080879616", - "language": "en" - }, - { - "content": "It\u2019s my faaaaavorite time of the Year! For the first time you can shop the list on @amazon! https:\/\/t.co\/a6GMvVrhjN https:\/\/t.co\/sJlQMROq5U", - "contenttype": "text/plain", - "created": 1446744186000, - "id": "662319239844380672", - "language": "en" - }, - { - "content": "Incredible story \"the spirit of the Lord is on you\" thanks for sharing @smokey_robinson #Masterclass", - "contenttype": "text/plain", - "created": 1446428929000, - "id": "660996956861280256", - "language": "en" - }, - { - "content": "Wasnt that incredible story about @smokey_robinson 's dad leaving his family at 12. #MasterClass", - "contenttype": "text/plain", - "created": 1446426630000, - "id": "660987310889041920", - "language": "en" - }, - { - "content": "Gayle, Charlie, Nora @CBSThisMorning Congratulations! #1000thshow", - "contenttype": "text/plain", - "created": 1446220097000, - "id": "660121050978611205", - "language": "en" - }, - { - "content": "I believe your home should rise up to meet you. @TheEllenShow you nailed it with HOME. Tweethearts, grab a copy! https:\/\/t.co\/iFMnpRAsno", - "contenttype": "text/plain", - "created": 1446074433000, - "id": "659510090748182528", - "language": "en" - }, - { - "content": "Can I get a Witness?!\u270b\ud83c\udffe https:\/\/t.co\/tZ1QyAeSdE", - "contenttype": "text/plain", - "created": 1445821114000, - "id": "658447593865945089", - "language": "en" - }, - { - "content": ".@TheEllenShow you're a treasure.\nYour truth set a lot of people free.\n#Masterclass", - "contenttype": "text/plain", - "created": 1445821003000, - "id": "658447130026188800", - "language": "en" - }, - { - "content": "Hope you all are enjoying @TheEllenShow on #MasterClass.", - "contenttype": "text/plain", - "created": 1445820161000, - "id": "658443598313181188", - "language": "en" - }, - { - "content": ".@GloriaSteinem, shero to women everywhere, on how far we\u2019ve come and how far we need to go. #SuperSoulSunday 7p\/6c.\nhttps:\/\/t.co\/3e7oxXW02J", - "contenttype": "text/plain", - "created": 1445811545000, - "id": "658407457438363648", - "language": "en" - }, - { - "content": "RT @TheEllenShow: I told a story from my @OWNTV's #MasterClass on my show. Normally I\u2019d save it all for Sunday, but @Oprah made me. https:\/\u2026", - "contenttype": "text/plain", - "created": 1445804181000, - "id": "658376572521459712", - "language": "en" - }, - { - "content": ".@TheEllenShow is a master teacher of living her truth & living authentically as herself. #MasterClass tonight 8\/7c.\nhttps:\/\/t.co\/iLT2KgRsSw", - "contenttype": "text/plain", - "created": 1445804072000, - "id": "658376116575449088", - "language": "en" - }, - { - "content": ".@SheriSalata , @jonnysinc @part2pictures . Tears of joy and gratitude to you and our entire #BeliefTeam We DID IT!! My heart is full.\ud83d\ude4f\ud83c\udffe\ud83d\ude4f\ud83c\udffe", - "contenttype": "text/plain", - "created": 1445734755000, - "id": "658085377140363264", - "language": "en" - }, - { - "content": "Donna and Bob saw the tape of their story just days before she passed. They appreciated it. #RIPDonna", - "contenttype": "text/plain", - "created": 1445734097000, - "id": "658082618819280896", - "language": "en" - }, - { - "content": "RT @rempower: .@Oprah this series allowed me to slide into people's lives around the world and see the same in them ... we all have a belie\u2026", - "contenttype": "text/plain", - "created": 1445732769000, - "id": "658077046858383360", - "language": "en" - }, - { - "content": "All the stories moved me, My favorite line \" I must pass the stories on to my grandson otherwise our people will loose their way. #Belief", - "contenttype": "text/plain", - "created": 1445732579000, - "id": "658076253618991104", - "language": "en" - }, - { - "content": ".@part2pictures some of your best imagery yet. Filming Alex on the rock.#Belief", - "contenttype": "text/plain", - "created": 1445731782000, - "id": "658072908237934592", - "language": "en" - }, - { - "content": "I just love Alex and his daring #Belief to live fully the present Moment.", - "contenttype": "text/plain", - "created": 1445731561000, - "id": "658071980982206464", - "language": "en" - }, - { - "content": "RT @GrowingOWN: Let's do this! #Belief finale tweet tweet party. Thank you @Oprah! \ud83d\ude4f", - "contenttype": "text/plain", - "created": 1445731248000, - "id": "658070668785770496", - "language": "en" - }, - { - "content": "RT @lizkinnell: The epic finale of #Belief on @OWNTV is about to start. 8\/et Are you ready? What do you Believe?", - "contenttype": "text/plain", - "created": 1445731081000, - "id": "658069968534171648", - "language": "en" - }, - { - "content": "Thank you all for another beautiful night of Belief. Belief runs all day tomorrow for bingers and final episode!", - "contenttype": "text/plain", - "created": 1445648630000, - "id": "657724143115202560", - "language": "en" - }, - { - "content": "RT @OWNingLight: #Belief is the ultimate travel map to mass acceptance. \ud83d\ude4f\ud83c\udffb\u2764\ufe0f\ud83c\udf0d @Oprah", - "contenttype": "text/plain", - "created": 1445647285000, - "id": "657718501147197442", - "language": "en" - }, - { - "content": "\" I can feel my heart opening and faith coming back in\".. What's better than that? #Belief", - "contenttype": "text/plain", - "created": 1445646903000, - "id": "657716901951369218", - "language": "en" - }, - { - "content": "Hey Belief team mates can yo believe how quickly the week has passed? #Belief", - "contenttype": "text/plain", - "created": 1445645633000, - "id": "657711572492533760", - "language": "en" - }, - { - "content": "Ran into @5SOS backstage. Fun times with @TheEllenShow today! https:\/\/t.co\/2PP3W3RzXc", - "contenttype": "text/plain", - "created": 1445618531000, - "id": "657597898394173440", - "language": "en" - }, - { - "content": "Thanks All for another great night of #BELIEF", - "contenttype": "text/plain", - "created": 1445572548000, - "id": "657405031822430208", - "language": "en" - }, - { - "content": "RT @3LWTV: #BecomeWhatYouBelieve New meditation w\/ @Oprah @DeepakChopra begins 11\/2 Register https:\/\/t.co\/x0R9HWTAX0 #Belief https:\/\/t.co\/\u2026", - "contenttype": "text/plain", - "created": 1445571500000, - "id": "657400636745510912", - "language": "en" - }, - { - "content": "Ok west coast let's do it! #belief", - "contenttype": "text/plain", - "created": 1445569367000, - "id": "657391689439404033", - "language": "en" - }, - { - "content": "Thank u kind gentleman who told me I had kale in my teeth. Was eating kale chips with Quincy Jones. Went straight to @LairdLife party.", - "contenttype": "text/plain", - "created": 1445569296000, - "id": "657391393883619328", - "language": "en" - }, - { - "content": "Hello west coast twitterati.. See you at 8 for #Belief", - "contenttype": "text/plain", - "created": 1445566144000, - "id": "657378171872874496", - "language": "en" - }, - { - "content": "Thank you all for another beautiful night.#Belief", - "contenttype": "text/plain", - "created": 1445475948000, - "id": "656999861254918145", - "language": "en" - }, - { - "content": "RT @PRanganathan: \"Transformation is the rule of the game. The universe is not standing still.\" - Marcelo @OWNTV @Oprah #Belief", - "contenttype": "text/plain", - "created": 1445475602000, - "id": "656998409933451264", - "language": "en" - }, - { - "content": "\"The Universe is not standing still.. The whole Universe is expanding\" I love the dance between science and spirituality! #Belief", - "contenttype": "text/plain", - "created": 1445475580000, - "id": "656998320133398528", - "language": "en" - }, - { - "content": "\"Without our prayers and our songs we won't be here\" Apache leader.#Belief", - "contenttype": "text/plain", - "created": 1445473768000, - "id": "656990717504393216", - "language": "en" - }, - { - "content": "Notice her mother crying. She knows its last tine she will ever see her daughter.#Belief", - "contenttype": "text/plain", - "created": 1445473150000, - "id": "656988127433637888", - "language": "en" - }, - { - "content": "This final trial is unbelievable. Every hair gets pulled from her head one at a time. Now that is Something!!#Belief", - "contenttype": "text/plain", - "created": 1445473063000, - "id": "656987763644891136", - "language": "en" - }, - { - "content": "\"What my faith gives me no one can match\"#Belief", - "contenttype": "text/plain", - "created": 1445472961000, - "id": "656987336266223616", - "language": "en" - }, - { - "content": "It's a devotion to faith beyond anything Ive seen. Fascinating.Jain nuns. #Belief", - "contenttype": "text/plain", - "created": 1445472531000, - "id": "656985529951522816", - "language": "en" - }, - { - "content": "I'd never heard of Jain monks and nuns before doing this series. #Belief", - "contenttype": "text/plain", - "created": 1445472393000, - "id": "656984953037586433", - "language": "en" - }, - { - "content": "Good evening Team #Belief the Tweet is on!", - "contenttype": "text/plain", - "created": 1445472098000, - "id": "656983714883239937", - "language": "en" - }, - { - "content": "Thanks everyone for another Epic #Belief night!", - "contenttype": "text/plain", - "created": 1445302792000, - "id": "656273592485810176", - "language": "en" - }, - { - "content": "The Pastor and Imam represent the possibility of PEACE in the world. If they can do it. We can. Peace to All\ud83d\ude4f\ud83c\udffe #Belief", - "contenttype": "text/plain", - "created": 1445302749000, - "id": "656273415280705536", - "language": "en" - }, - { - "content": "We felt privileged to film the Hajj and explore the beauty of Islam.\n#99namesforGod #Belief", - "contenttype": "text/plain", - "created": 1445301110000, - "id": "656266540967424000", - "language": "en" - }, - { - "content": "Do you all believe in \"soul mates\"?\n#Belief", - "contenttype": "text/plain", - "created": 1445300138000, - "id": "656262462426238976", - "language": "en" - }, - { - "content": ".@RevEdBacon thank you so much for hosting tonight's #Belief at All Saints Church.", - "contenttype": "text/plain", - "created": 1445299749000, - "id": "656260832628756480", - "language": "en" - }, - { - "content": "This is one of the best love stories I've ever seen. #belief Ian and Larissa showing us the depths of love.#Belief", - "contenttype": "text/plain", - "created": 1445299614000, - "id": "656260263604310016", - "language": "en" - }, - { - "content": "Hey Everyone .. Tweeting from a bar in Atlanta with @SheriSalata OWN not in my hotel. Anything for #Belief", - "contenttype": "text/plain", - "created": 1445299326000, - "id": "656259057758654464", - "language": "en" - }, - { - "content": "RT @joshuadubois: When you see Ian & Larissa tonight on #BELIEF, you'll know what Christian love is all about. 8pmET, @OWNTV. Tune in! http\u2026", - "contenttype": "text/plain", - "created": 1445295716000, - "id": "656243916224638976", - "language": "en" - }, - { - "content": "RT @KimHaleDance: I Believe LAUGHTER. IS. CONTAGIOUS. What do you believe? See you tonight! 8\/7c.\u2764\ufe0f #Belief #Beliefin3words https:\/\/t.co\/x\u2026", - "contenttype": "text/plain", - "created": 1445295702000, - "id": "656243854610337793", - "language": "en" - }, - { - "content": "RT @OWNTV: See the world through someone else\u2019s soul. The epic journey of #Belief continues tonight at 8\/7c.\nhttps:\/\/t.co\/UKKMHZuC0g", - "contenttype": "text/plain", - "created": 1445295668000, - "id": "656243714507931648", - "language": "en" - }, - { - "content": "RT @OWNTV: Mendel Hurwitz's inquisitive nature grounded him in his faith. See where it's taken him now: https:\/\/t.co\/2iWmNOxK9r https:\/\/t.c\u2026", - "contenttype": "text/plain", - "created": 1445295661000, - "id": "656243684720050176", - "language": "en" - }, - { - "content": "Thank you for opening up the heart space and letting #Belief in. Tonight it keeps getting better. See you at 8\/7c\nhttps:\/\/t.co\/E65vkTray9", - "contenttype": "text/plain", - "created": 1445279425000, - "id": "656175584943341568", - "language": "en" - }, - { - "content": "I believe in the @weightwatchers program so much I decided to invest, join the Board, and partner in #wwfamily evolution.", - "contenttype": "text/plain", - "created": 1445275802000, - "id": "656160388526899200", - "language": "en" - }, - { - "content": "RT @AVAETC: Debut episode of #BELIEF has now aired on both coasts. Trended for 4 hours. Brava, @Oprah + team. 6 beautiful nights to come. B\u2026", - "contenttype": "text/plain", - "created": 1445229489000, - "id": "655966138279432192", - "language": "en" - }, - { - "content": "RT @3LWTV: 6 more epic nights of #Belief to come! See you tomorrow 8pET\/7pCT for the next installment! @OWNTV @Oprah", - "contenttype": "text/plain", - "created": 1445227342000, - "id": "655957135688241152", - "language": "en" - }, - { - "content": "RT @ledisi: I love how Ancestry and Tradition is honored throughout every story in #Belief @OWNTV @OWN Thank you @Oprah this is so importa\u2026", - "contenttype": "text/plain", - "created": 1445225935000, - "id": "655951232981295104", - "language": "en" - }, - { - "content": "RT @UN: Showing #Belief at the UN \"is a bigger dream than I had\" - @Oprah https:\/\/t.co\/VC4OqD8yub #Belief #GlobalGoals https:\/\/t.co\/LZyGuC7\u2026", - "contenttype": "text/plain", - "created": 1445225228000, - "id": "655948267868426240", - "language": "en" - }, - { - "content": "RT @UzoAduba: To seek, to question, to learn, to teach, to pass it on; some of the breathtaking themes running like water throughout #Belie\u2026", - "contenttype": "text/plain", - "created": 1445225008000, - "id": "655947345197076480", - "language": "en" - }, - { - "content": "RT @iamtikasumpter: #Belief had me in awe. Faith in the divine is the constant that links us together. It's all beautiful and righteous. Gi\u2026", - "contenttype": "text/plain", - "created": 1445224852000, - "id": "655946689249828864", - "language": "en" - }, - { - "content": "West Coast... Here we go. #Belief", - "contenttype": "text/plain", - "created": 1445224140000, - "id": "655943701840048128", - "language": "en" - }, - { - "content": "Big surprise at icanady watch party. Epic night. #Belief. So much more to come. https:\/\/t.co\/kBDtFwGyQs", - "contenttype": "text/plain", - "created": 1445220694000, - "id": "655929249669378048", - "language": "en" - }, - { - "content": "I loved the Mendel story so much because it represents right of passasge. \" bye bye to childhood\".#Belief", - "contenttype": "text/plain", - "created": 1445215032000, - "id": "655905500056391682", - "language": "en" - }, - { - "content": "RT @squee_machine: This is a visual feast! Completely gorgeous and I am transfixed. The colors, the composition, cinematography, vibe. #Bel\u2026", - "contenttype": "text/plain", - "created": 1445214538000, - "id": "655903432079904768", - "language": "en" - }, - { - "content": "RT @JamesTyphany: Looking at @ #Belief I really needed this in my life thanks @OWNTV", - "contenttype": "text/plain", - "created": 1445214534000, - "id": "655903413385891840", - "language": "en" - }, - { - "content": "Just surprised a \"watch party\" @icanady 's house. #Belief http:\/\/t.co\/Di0I3OooCh", - "contenttype": "text/plain", - "created": 1445214502000, - "id": "655903277796732931", - "language": "en" - }, - { - "content": "RT @MsLaWandaa: I love moments of sitting among elders and learning. I can feel this moment was special for @ReshThakkar #Belief", - "contenttype": "text/plain", - "created": 1445214498000, - "id": "655903264374812672", - "language": "en" - }, - { - "content": "RT @xonecole: \"Do you have to have religion or is being a good person...is that enough?\" #Belief", - "contenttype": "text/plain", - "created": 1445214339000, - "id": "655902594171203584", - "language": "en" - }, - { - "content": "RT @ChivonJohn: Very inspired by the stories on #belief https:\/\/t.co\/uMRCCfCWcY", - "contenttype": "text/plain", - "created": 1445214327000, - "id": "655902545903140864", - "language": "en" - }, - { - "content": "RT @KiranSArora: powerful personal story that many of us can connect to. searching for what's missing, spiritual liberation. @ReshThakkar #\u2026", - "contenttype": "text/plain", - "created": 1445214128000, - "id": "655901708506103812", - "language": "en" - }, - { - "content": "RT @createdbyerica: \"I'm willing to go as far as I have to go to get that feeling in my heart of being connected to the Divine.\" - Reshma @\u2026", - "contenttype": "text/plain", - "created": 1445213967000, - "id": "655901033952993280", - "language": "en" - }, - { - "content": "RT @UrbnHealthNP: I am enjoying this so much. #Belief", - "contenttype": "text/plain", - "created": 1445213904000, - "id": "655900772467474435", - "language": "en" - }, - { - "content": "RT @DrMABrown: Your relationship with #belief can be completely different than how others experience it", - "contenttype": "text/plain", - "created": 1445213901000, - "id": "655900756604620800", - "language": "en" - }, - { - "content": "RT @ckerfin: On another river on the other side of the world... transition between stories, drawing connections to different beliefs #Belie\u2026", - "contenttype": "text/plain", - "created": 1445213721000, - "id": "655900002644987905", - "language": "en" - }, - { - "content": "RT @nikfarrior: So profound. We are all born into #Belief @Oprah @OWN @OWNTV", - "contenttype": "text/plain", - "created": 1445213706000, - "id": "655899942242775040", - "language": "en" - }, - { - "content": "RT @fgaboys: @Oprah the start of #Belief is riveting and edifying. It makes you want more and inspires you too see what's coming up next. \u2026", - "contenttype": "text/plain", - "created": 1445213699000, - "id": "655899910563217408", - "language": "en" - }, - { - "content": "RT @MalikaGhosh: Here we GO- let's lose ourselves #belief NOW @Oprah JOY http:\/\/t.co\/vYSNLd3LvC", - "contenttype": "text/plain", - "created": 1445212831000, - "id": "655896269542420480", - "language": "en" - }, - { - "content": "RT @MastinKipp: .@Oprah and team are about to bring the Light with #Belief.", - "contenttype": "text/plain", - "created": 1445212747000, - "id": "655895916675600384", - "language": "en" - }, - { - "content": "RT @GrowingOWN: 7 minutes, y'all! #BELIEF", - "contenttype": "text/plain", - "created": 1445212465000, - "id": "655894734968217600", - "language": "en" - }, - { - "content": "RT @LPToussaint: BELIEF defines experience.Choose wisely #Belief Tonight on OWN", - "contenttype": "text/plain", - "created": 1445211845000, - "id": "655892134524878848", - "language": "en" - }, - { - "content": "RT @TheBeBeWinans: Congratulations to my dear friend @Oprah on the launch of your series #BELIEF #Tonight on @OWNTV. Your friendship inspir\u2026", - "contenttype": "text/plain", - "created": 1445211835000, - "id": "655892094905532416", - "language": "en" - }, - { - "content": "RT @UzoAduba: Thirty minutes to @Oprah #Belief. Pretty sure it's about to be our favorite thing.", - "contenttype": "text/plain", - "created": 1445211833000, - "id": "655892084314890240", - "language": "en" - }, - { - "content": "RT @DeandresVoice: Moments away from the start of @SuperSoulSunday and the first night of the epic #Belief series on @OWNTV - are you ready\u2026", - "contenttype": "text/plain", - "created": 1445209201000, - "id": "655881046102142978", - "language": "en" - }, - { - "content": "RT @jennaldewan: I CANT WAIT FOR THIS TONIGHT!!!!!! Got my popcorn, got my tissues I am readyyyyyy! #Belief https:\/\/t.co\/WluTdeEqal", - "contenttype": "text/plain", - "created": 1445209181000, - "id": "655880959535939584", - "language": "en" - }, - { - "content": "RT @indiaarie: U heard about @Oprah passion project? It's called #Belief - I've see some & Its special! Tonight - 7c\/8e - 7 nights!! Whose\u2026", - "contenttype": "text/plain", - "created": 1445208945000, - "id": "655879970732949504", - "language": "en" - }, - { - "content": "Wow, I liked @TheRock before, now I really SEE how special he is. The daughter story was IT for me. So great! #MasterClass", - "contenttype": "text/plain", - "created": 1447639154000, - "id": "666073008692314113", - "language": "en" - }, - { - "content": ".@TheRock how did you Know to listen to your gut and Not go back to football? #Masterclass", - "contenttype": "text/plain", - "created": 1447638226000, - "id": "666069114889179136", - "language": "en" - }, - { - "content": ".@TheRock moving back in with your parents so humbling. \" on the other side of your pain is something good if you can hold on\" #masterclass", - "contenttype": "text/plain", - "created": 1447638067000, - "id": "666068446325665792", - "language": "en" - }, - { - "content": "Wow aren't you loving @TheRock and his candor? #Masterclass", - "contenttype": "text/plain", - "created": 1447637459000, - "id": "666065895932973057", - "language": "en" - }, - { - "content": "RT @patt_t: @TheRock @Oprah @RichOnOWN @OWNTV this interview makes me like you as a fellow human even more for being so real.", - "contenttype": "text/plain", - "created": 1447637030000, - "id": "666064097562247168", - "language": "en" - }, - { - "content": "\"Be You\".. That's the best advice ever @TheRock #MastersClass", - "contenttype": "text/plain", - "created": 1447636205000, - "id": "666060637181644800", - "language": "en" - }, - { - "content": "Supersoulers let's lift our spirits pray and hold Paris in the Light\ud83d\ude4f\ud83c\udffe", - "contenttype": "text/plain", - "created": 1447602477000, - "id": "665919171062927360", - "language": "en" - }, - { - "content": "RT @DeepakChopra: What I learned in week 1: Become What You Believe 21-Day Meditation Experience - https:\/\/t.co\/kqaMaMqEUp #GoogleAlerts", - "contenttype": "text/plain", - "created": 1447098990000, - "id": "663807393063538688", - "language": "en" - }, - { - "content": "Watching Bryan Stevenson on #SuperSoulSunday! \"You are not the worst mistake you ever made\".\nAren't we glad about that.", - "contenttype": "text/plain", - "created": 1446998643000, - "id": "663386507856736257", - "language": "en" - }, - { - "content": ".@CherylStrayed BRAVE ENOUGH my new favorite thing! Gonna buy a copy for all my girls. #Perfectgift https:\/\/t.co\/gz1tnv8t8K", - "contenttype": "text/plain", - "created": 1446915955000, - "id": "663039689360695296", - "language": "en" - }, - { - "content": "Stevie Wonder singing \"Happy Birthday to you!\" to my dear mariashriver. A phenomenal woman and\u2026 https:\/\/t.co\/Ygm5eDIs4f", - "contenttype": "text/plain", - "created": 1446881193000, - "id": "662893888080879616", - "language": "en" - }, - { - "content": "It\u2019s my faaaaavorite time of the Year! For the first time you can shop the list on @amazon! https:\/\/t.co\/a6GMvVrhjN https:\/\/t.co\/sJlQMROq5U", - "contenttype": "text/plain", - "created": 1446744186000, - "id": "662319239844380672", - "language": "en" - }, - { - "content": "Incredible story \"the spirit of the Lord is on you\" thanks for sharing @smokey_robinson #Masterclass", - "contenttype": "text/plain", - "created": 1446428929000, - "id": "660996956861280256", - "language": "en" - }, - { - "content": "Wasnt that incredible story about @smokey_robinson 's dad leaving his family at 12. #MasterClass", - "contenttype": "text/plain", - "created": 1446426630000, - "id": "660987310889041920", - "language": "en" - }, - { - "content": "Gayle, Charlie, Nora @CBSThisMorning Congratulations! #1000thshow", - "contenttype": "text/plain", - "created": 1446220097000, - "id": "660121050978611205", - "language": "en" - }, - { - "content": "I believe your home should rise up to meet you. @TheEllenShow you nailed it with HOME. Tweethearts, grab a copy! https:\/\/t.co\/iFMnpRAsno", - "contenttype": "text/plain", - "created": 1446074433000, - "id": "659510090748182528", - "language": "en" - }, - { - "content": "Can I get a Witness?!\u270b\ud83c\udffe https:\/\/t.co\/tZ1QyAeSdE", - "contenttype": "text/plain", - "created": 1445821114000, - "id": "658447593865945089", - "language": "en" - }, - { - "content": ".@TheEllenShow you're a treasure.\nYour truth set a lot of people free.\n#Masterclass", - "contenttype": "text/plain", - "created": 1445821003000, - "id": "658447130026188800", - "language": "en" - }, - { - "content": "Hope you all are enjoying @TheEllenShow on #MasterClass.", - "contenttype": "text/plain", - "created": 1445820161000, - "id": "658443598313181188", - "language": "en" - }, - { - "content": ".@GloriaSteinem, shero to women everywhere, on how far we\u2019ve come and how far we need to go. #SuperSoulSunday 7p\/6c.\nhttps:\/\/t.co\/3e7oxXW02J", - "contenttype": "text/plain", - "created": 1445811545000, - "id": "658407457438363648", - "language": "en" - }, - { - "content": "RT @TheEllenShow: I told a story from my @OWNTV's #MasterClass on my show. Normally I\u2019d save it all for Sunday, but @Oprah made me. https:\/\u2026", - "contenttype": "text/plain", - "created": 1445804181000, - "id": "658376572521459712", - "language": "en" - }, - { - "content": ".@TheEllenShow is a master teacher of living her truth & living authentically as herself. #MasterClass tonight 8\/7c.\nhttps:\/\/t.co\/iLT2KgRsSw", - "contenttype": "text/plain", - "created": 1445804072000, - "id": "658376116575449088", - "language": "en" - }, - { - "content": ".@SheriSalata , @jonnysinc @part2pictures . Tears of joy and gratitude to you and our entire #BeliefTeam We DID IT!! My heart is full.\ud83d\ude4f\ud83c\udffe\ud83d\ude4f\ud83c\udffe", - "contenttype": "text/plain", - "created": 1445734755000, - "id": "658085377140363264", - "language": "en" - }, - { - "content": "Donna and Bob saw the tape of their story just days before she passed. They appreciated it. #RIPDonna", - "contenttype": "text/plain", - "created": 1445734097000, - "id": "658082618819280896", - "language": "en" - }, - { - "content": "RT @rempower: .@Oprah this series allowed me to slide into people's lives around the world and see the same in them ... we all have a belie\u2026", - "contenttype": "text/plain", - "created": 1445732769000, - "id": "658077046858383360", - "language": "en" - }, - { - "content": "All the stories moved me, My favorite line \" I must pass the stories on to my grandson otherwise our people will loose their way. #Belief", - "contenttype": "text/plain", - "created": 1445732579000, - "id": "658076253618991104", - "language": "en" - }, - { - "content": ".@part2pictures some of your best imagery yet. Filming Alex on the rock.#Belief", - "contenttype": "text/plain", - "created": 1445731782000, - "id": "658072908237934592", - "language": "en" - }, - { - "content": "I just love Alex and his daring #Belief to live fully the present Moment.", - "contenttype": "text/plain", - "created": 1445731561000, - "id": "658071980982206464", - "language": "en" - }, - { - "content": "RT @GrowingOWN: Let's do this! #Belief finale tweet tweet party. Thank you @Oprah! \ud83d\ude4f", - "contenttype": "text/plain", - "created": 1445731248000, - "id": "658070668785770496", - "language": "en" - }, - { - "content": "RT @lizkinnell: The epic finale of #Belief on @OWNTV is about to start. 8\/et Are you ready? What do you Believe?", - "contenttype": "text/plain", - "created": 1445731081000, - "id": "658069968534171648", - "language": "en" - }, - { - "content": "Thank you all for another beautiful night of Belief. Belief runs all day tomorrow for bingers and final episode!", - "contenttype": "text/plain", - "created": 1445648630000, - "id": "657724143115202560", - "language": "en" - }, - { - "content": "RT @OWNingLight: #Belief is the ultimate travel map to mass acceptance. \ud83d\ude4f\ud83c\udffb\u2764\ufe0f\ud83c\udf0d @Oprah", - "contenttype": "text/plain", - "created": 1445647285000, - "id": "657718501147197442", - "language": "en" - }, - { - "content": "\" I can feel my heart opening and faith coming back in\".. What's better than that? #Belief", - "contenttype": "text/plain", - "created": 1445646903000, - "id": "657716901951369218", - "language": "en" - }, - { - "content": "Hey Belief team mates can yo believe how quickly the week has passed? #Belief", - "contenttype": "text/plain", - "created": 1445645633000, - "id": "657711572492533760", - "language": "en" - }, - { - "content": "Ran into @5SOS backstage. Fun times with @TheEllenShow today! https:\/\/t.co\/2PP3W3RzXc", - "contenttype": "text/plain", - "created": 1445618531000, - "id": "657597898394173440", - "language": "en" - }, - { - "content": "Thanks All for another great night of #BELIEF", - "contenttype": "text/plain", - "created": 1445572548000, - "id": "657405031822430208", - "language": "en" - }, - { - "content": "RT @3LWTV: #BecomeWhatYouBelieve New meditation w\/ @Oprah @DeepakChopra begins 11\/2 Register https:\/\/t.co\/x0R9HWTAX0 #Belief https:\/\/t.co\/\u2026", - "contenttype": "text/plain", - "created": 1445571500000, - "id": "657400636745510912", - "language": "en" - }, - { - "content": "Ok west coast let's do it! #belief", - "contenttype": "text/plain", - "created": 1445569367000, - "id": "657391689439404033", - "language": "en" - }, - { - "content": "Thank u kind gentleman who told me I had kale in my teeth. Was eating kale chips with Quincy Jones. Went straight to @LairdLife party.", - "contenttype": "text/plain", - "created": 1445569296000, - "id": "657391393883619328", - "language": "en" - }, - { - "content": "Hello west coast twitterati.. See you at 8 for #Belief", - "contenttype": "text/plain", - "created": 1445566144000, - "id": "657378171872874496", - "language": "en" - }, - { - "content": "Thank you all for another beautiful night.#Belief", - "contenttype": "text/plain", - "created": 1445475948000, - "id": "656999861254918145", - "language": "en" - }, - { - "content": "RT @PRanganathan: \"Transformation is the rule of the game. The universe is not standing still.\" - Marcelo @OWNTV @Oprah #Belief", - "contenttype": "text/plain", - "created": 1445475602000, - "id": "656998409933451264", - "language": "en" - }, - { - "content": "\"The Universe is not standing still.. The whole Universe is expanding\" I love the dance between science and spirituality! #Belief", - "contenttype": "text/plain", - "created": 1445475580000, - "id": "656998320133398528", - "language": "en" - }, - { - "content": "\"Without our prayers and our songs we won't be here\" Apache leader.#Belief", - "contenttype": "text/plain", - "created": 1445473768000, - "id": "656990717504393216", - "language": "en" - }, - { - "content": "Notice her mother crying. She knows its last tine she will ever see her daughter.#Belief", - "contenttype": "text/plain", - "created": 1445473150000, - "id": "656988127433637888", - "language": "en" - }, - { - "content": "This final trial is unbelievable. Every hair gets pulled from her head one at a time. Now that is Something!!#Belief", - "contenttype": "text/plain", - "created": 1445473063000, - "id": "656987763644891136", - "language": "en" - }, - { - "content": "\"What my faith gives me no one can match\"#Belief", - "contenttype": "text/plain", - "created": 1445472961000, - "id": "656987336266223616", - "language": "en" - }, - { - "content": "It's a devotion to faith beyond anything Ive seen. Fascinating.Jain nuns. #Belief", - "contenttype": "text/plain", - "created": 1445472531000, - "id": "656985529951522816", - "language": "en" - }, - { - "content": "I'd never heard of Jain monks and nuns before doing this series. #Belief", - "contenttype": "text/plain", - "created": 1445472393000, - "id": "656984953037586433", - "language": "en" - }, - { - "content": "Good evening Team #Belief the Tweet is on!", - "contenttype": "text/plain", - "created": 1445472098000, - "id": "656983714883239937", - "language": "en" - }, - { - "content": "Thanks everyone for another Epic #Belief night!", - "contenttype": "text/plain", - "created": 1445302792000, - "id": "656273592485810176", - "language": "en" - }, - { - "content": "The Pastor and Imam represent the possibility of PEACE in the world. If they can do it. We can. Peace to All\ud83d\ude4f\ud83c\udffe #Belief", - "contenttype": "text/plain", - "created": 1445302749000, - "id": "656273415280705536", - "language": "en" - }, - { - "content": "We felt privileged to film the Hajj and explore the beauty of Islam.\n#99namesforGod #Belief", - "contenttype": "text/plain", - "created": 1445301110000, - "id": "656266540967424000", - "language": "en" - }, - { - "content": "Do you all believe in \"soul mates\"?\n#Belief", - "contenttype": "text/plain", - "created": 1445300138000, - "id": "656262462426238976", - "language": "en" - }, - { - "content": ".@RevEdBacon thank you so much for hosting tonight's #Belief at All Saints Church.", - "contenttype": "text/plain", - "created": 1445299749000, - "id": "656260832628756480", - "language": "en" - }, - { - "content": "This is one of the best love stories I've ever seen. #belief Ian and Larissa showing us the depths of love.#Belief", - "contenttype": "text/plain", - "created": 1445299614000, - "id": "656260263604310016", - "language": "en" - }, - { - "content": "Hey Everyone .. Tweeting from a bar in Atlanta with @SheriSalata OWN not in my hotel. Anything for #Belief", - "contenttype": "text/plain", - "created": 1445299326000, - "id": "656259057758654464", - "language": "en" - }, - { - "content": "RT @joshuadubois: When you see Ian & Larissa tonight on #BELIEF, you'll know what Christian love is all about. 8pmET, @OWNTV. Tune in! http\u2026", - "contenttype": "text/plain", - "created": 1445295716000, - "id": "656243916224638976", - "language": "en" - }, - { - "content": "RT @KimHaleDance: I Believe LAUGHTER. IS. CONTAGIOUS. What do you believe? See you tonight! 8\/7c.\u2764\ufe0f #Belief #Beliefin3words https:\/\/t.co\/x\u2026", - "contenttype": "text/plain", - "created": 1445295702000, - "id": "656243854610337793", - "language": "en" - }, - { - "content": "RT @OWNTV: See the world through someone else\u2019s soul. The epic journey of #Belief continues tonight at 8\/7c.\nhttps:\/\/t.co\/UKKMHZuC0g", - "contenttype": "text/plain", - "created": 1445295668000, - "id": "656243714507931648", - "language": "en" - }, - { - "content": "RT @OWNTV: Mendel Hurwitz's inquisitive nature grounded him in his faith. See where it's taken him now: https:\/\/t.co\/2iWmNOxK9r https:\/\/t.c\u2026", - "contenttype": "text/plain", - "created": 1445295661000, - "id": "656243684720050176", - "language": "en" - }, - { - "content": "Thank you for opening up the heart space and letting #Belief in. Tonight it keeps getting better. See you at 8\/7c\nhttps:\/\/t.co\/E65vkTray9", - "contenttype": "text/plain", - "created": 1445279425000, - "id": "656175584943341568", - "language": "en" - }, - { - "content": "I believe in the @weightwatchers program so much I decided to invest, join the Board, and partner in #wwfamily evolution.", - "contenttype": "text/plain", - "created": 1445275802000, - "id": "656160388526899200", - "language": "en" - }, - { - "content": "RT @AVAETC: Debut episode of #BELIEF has now aired on both coasts. Trended for 4 hours. Brava, @Oprah + team. 6 beautiful nights to come. B\u2026", - "contenttype": "text/plain", - "created": 1445229489000, - "id": "655966138279432192", - "language": "en" - }, - { - "content": "RT @3LWTV: 6 more epic nights of #Belief to come! See you tomorrow 8pET\/7pCT for the next installment! @OWNTV @Oprah", - "contenttype": "text/plain", - "created": 1445227342000, - "id": "655957135688241152", - "language": "en" - }, - { - "content": "RT @ledisi: I love how Ancestry and Tradition is honored throughout every story in #Belief @OWNTV @OWN Thank you @Oprah this is so importa\u2026", - "contenttype": "text/plain", - "created": 1445225935000, - "id": "655951232981295104", - "language": "en" - }, - { - "content": "RT @UN: Showing #Belief at the UN \"is a bigger dream than I had\" - @Oprah https:\/\/t.co\/VC4OqD8yub #Belief #GlobalGoals https:\/\/t.co\/LZyGuC7\u2026", - "contenttype": "text/plain", - "created": 1445225228000, - "id": "655948267868426240", - "language": "en" - }, - { - "content": "RT @UzoAduba: To seek, to question, to learn, to teach, to pass it on; some of the breathtaking themes running like water throughout #Belie\u2026", - "contenttype": "text/plain", - "created": 1445225008000, - "id": "655947345197076480", - "language": "en" - }, - { - "content": "RT @iamtikasumpter: #Belief had me in awe. Faith in the divine is the constant that links us together. It's all beautiful and righteous. Gi\u2026", - "contenttype": "text/plain", - "created": 1445224852000, - "id": "655946689249828864", - "language": "en" - }, - { - "content": "West Coast... Here we go. #Belief", - "contenttype": "text/plain", - "created": 1445224140000, - "id": "655943701840048128", - "language": "en" - }, - { - "content": "Big surprise at icanady watch party. Epic night. #Belief. So much more to come. https:\/\/t.co\/kBDtFwGyQs", - "contenttype": "text/plain", - "created": 1445220694000, - "id": "655929249669378048", - "language": "en" - }, - { - "content": "I loved the Mendel story so much because it represents right of passasge. \" bye bye to childhood\".#Belief", - "contenttype": "text/plain", - "created": 1445215032000, - "id": "655905500056391682", - "language": "en" - }, - { - "content": "RT @squee_machine: This is a visual feast! Completely gorgeous and I am transfixed. The colors, the composition, cinematography, vibe. #Bel\u2026", - "contenttype": "text/plain", - "created": 1445214538000, - "id": "655903432079904768", - "language": "en" - }, - { - "content": "RT @JamesTyphany: Looking at @ #Belief I really needed this in my life thanks @OWNTV", - "contenttype": "text/plain", - "created": 1445214534000, - "id": "655903413385891840", - "language": "en" - }, - { - "content": "Just surprised a \"watch party\" @icanady 's house. #Belief http:\/\/t.co\/Di0I3OooCh", - "contenttype": "text/plain", - "created": 1445214502000, - "id": "655903277796732931", - "language": "en" - }, - { - "content": "RT @MsLaWandaa: I love moments of sitting among elders and learning. I can feel this moment was special for @ReshThakkar #Belief", - "contenttype": "text/plain", - "created": 1445214498000, - "id": "655903264374812672", - "language": "en" - }, - { - "content": "RT @xonecole: \"Do you have to have religion or is being a good person...is that enough?\" #Belief", - "contenttype": "text/plain", - "created": 1445214339000, - "id": "655902594171203584", - "language": "en" - }, - { - "content": "RT @ChivonJohn: Very inspired by the stories on #belief https:\/\/t.co\/uMRCCfCWcY", - "contenttype": "text/plain", - "created": 1445214327000, - "id": "655902545903140864", - "language": "en" - }, - { - "content": "RT @KiranSArora: powerful personal story that many of us can connect to. searching for what's missing, spiritual liberation. @ReshThakkar #\u2026", - "contenttype": "text/plain", - "created": 1445214128000, - "id": "655901708506103812", - "language": "en" - }, - { - "content": "RT @createdbyerica: \"I'm willing to go as far as I have to go to get that feeling in my heart of being connected to the Divine.\" - Reshma @\u2026", - "contenttype": "text/plain", - "created": 1445213967000, - "id": "655901033952993280", - "language": "en" - }, - { - "content": "RT @UrbnHealthNP: I am enjoying this so much. #Belief", - "contenttype": "text/plain", - "created": 1445213904000, - "id": "655900772467474435", - "language": "en" - }, - { - "content": "RT @DrMABrown: Your relationship with #belief can be completely different than how others experience it", - "contenttype": "text/plain", - "created": 1445213901000, - "id": "655900756604620800", - "language": "en" - }, - { - "content": "RT @ckerfin: On another river on the other side of the world... transition between stories, drawing connections to different beliefs #Belie\u2026", - "contenttype": "text/plain", - "created": 1445213721000, - "id": "655900002644987905", - "language": "en" - }, - { - "content": "RT @nikfarrior: So profound. We are all born into #Belief @Oprah @OWN @OWNTV", - "contenttype": "text/plain", - "created": 1445213706000, - "id": "655899942242775040", - "language": "en" - }, - { - "content": "RT @fgaboys: @Oprah the start of #Belief is riveting and edifying. It makes you want more and inspires you too see what's coming up next. \u2026", - "contenttype": "text/plain", - "created": 1445213699000, - "id": "655899910563217408", - "language": "en" - }, - { - "content": "RT @MalikaGhosh: Here we GO- let's lose ourselves #belief NOW @Oprah JOY http:\/\/t.co\/vYSNLd3LvC", - "contenttype": "text/plain", - "created": 1445212831000, - "id": "655896269542420480", - "language": "en" - }, - { - "content": "RT @MastinKipp: .@Oprah and team are about to bring the Light with #Belief.", - "contenttype": "text/plain", - "created": 1445212747000, - "id": "655895916675600384", - "language": "en" - }, - { - "content": "RT @GrowingOWN: 7 minutes, y'all! #BELIEF", - "contenttype": "text/plain", - "created": 1445212465000, - "id": "655894734968217600", - "language": "en" - }, - { - "content": "RT @LPToussaint: BELIEF defines experience.Choose wisely #Belief Tonight on OWN", - "contenttype": "text/plain", - "created": 1445211845000, - "id": "655892134524878848", - "language": "en" - }, - { - "content": "RT @TheBeBeWinans: Congratulations to my dear friend @Oprah on the launch of your series #BELIEF #Tonight on @OWNTV. Your friendship inspir\u2026", - "contenttype": "text/plain", - "created": 1445211835000, - "id": "655892094905532416", - "language": "en" - }, - { - "content": "RT @UzoAduba: Thirty minutes to @Oprah #Belief. Pretty sure it's about to be our favorite thing.", - "contenttype": "text/plain", - "created": 1445211833000, - "id": "655892084314890240", - "language": "en" - }, - { - "content": "RT @DeandresVoice: Moments away from the start of @SuperSoulSunday and the first night of the epic #Belief series on @OWNTV - are you ready\u2026", - "contenttype": "text/plain", - "created": 1445209201000, - "id": "655881046102142978", - "language": "en" - }, - { - "content": "RT @jennaldewan: I CANT WAIT FOR THIS TONIGHT!!!!!! Got my popcorn, got my tissues I am readyyyyyy! #Belief https:\/\/t.co\/WluTdeEqal", - "contenttype": "text/plain", - "created": 1445209181000, - "id": "655880959535939584", - "language": "en" - }, - { - "content": "RT @indiaarie: U heard about @Oprah passion project? It's called #Belief - I've see some & Its special! Tonight - 7c\/8e - 7 nights!! Whose\u2026", - "contenttype": "text/plain", - "created": 1445208945000, - "id": "655879970732949504", - "language": "en" - }, - { - "content": "Wow, I liked @TheRock before, now I really SEE how special he is. The daughter story was IT for me. So great! #MasterClass", - "contenttype": "text/plain", - "created": 1447639154000, - "id": "666073008692314113", - "language": "en" - }, - { - "content": ".@TheRock how did you Know to listen to your gut and Not go back to football? #Masterclass", - "contenttype": "text/plain", - "created": 1447638226000, - "id": "666069114889179136", - "language": "en" - }, - { - "content": ".@TheRock moving back in with your parents so humbling. \" on the other side of your pain is something good if you can hold on\" #masterclass", - "contenttype": "text/plain", - "created": 1447638067000, - "id": "666068446325665792", - "language": "en" - }, - { - "content": "Wow aren't you loving @TheRock and his candor? #Masterclass", - "contenttype": "text/plain", - "created": 1447637459000, - "id": "666065895932973057", - "language": "en" - }, - { - "content": "RT @patt_t: @TheRock @Oprah @RichOnOWN @OWNTV this interview makes me like you as a fellow human even more for being so real.", - "contenttype": "text/plain", - "created": 1447637030000, - "id": "666064097562247168", - "language": "en" - }, - { - "content": "\"Be You\".. That's the best advice ever @TheRock #MastersClass", - "contenttype": "text/plain", - "created": 1447636205000, - "id": "666060637181644800", - "language": "en" - }, - { - "content": "Supersoulers let's lift our spirits pray and hold Paris in the Light\ud83d\ude4f\ud83c\udffe", - "contenttype": "text/plain", - "created": 1447602477000, - "id": "665919171062927360", - "language": "en" - }, - { - "content": "RT @DeepakChopra: What I learned in week 1: Become What You Believe 21-Day Meditation Experience - https:\/\/t.co\/kqaMaMqEUp #GoogleAlerts", - "contenttype": "text/plain", - "created": 1447098990000, - "id": "663807393063538688", - "language": "en" - }, - { - "content": "Watching Bryan Stevenson on #SuperSoulSunday! \"You are not the worst mistake you ever made\".\nAren't we glad about that.", - "contenttype": "text/plain", - "created": 1446998643000, - "id": "663386507856736257", - "language": "en" - }, - { - "content": ".@CherylStrayed BRAVE ENOUGH my new favorite thing! Gonna buy a copy for all my girls. #Perfectgift https:\/\/t.co\/gz1tnv8t8K", - "contenttype": "text/plain", - "created": 1446915955000, - "id": "663039689360695296", - "language": "en" - }, - { - "content": "Stevie Wonder singing \"Happy Birthday to you!\" to my dear mariashriver. A phenomenal woman and\u2026 https:\/\/t.co\/Ygm5eDIs4f", - "contenttype": "text/plain", - "created": 1446881193000, - "id": "662893888080879616", - "language": "en" - }, - { - "content": "It\u2019s my faaaaavorite time of the Year! For the first time you can shop the list on @amazon! https:\/\/t.co\/a6GMvVrhjN https:\/\/t.co\/sJlQMROq5U", - "contenttype": "text/plain", - "created": 1446744186000, - "id": "662319239844380672", - "language": "en" - }, - { - "content": "Incredible story \"the spirit of the Lord is on you\" thanks for sharing @smokey_robinson #Masterclass", - "contenttype": "text/plain", - "created": 1446428929000, - "id": "660996956861280256", - "language": "en" - }, - { - "content": "Wasnt that incredible story about @smokey_robinson 's dad leaving his family at 12. #MasterClass", - "contenttype": "text/plain", - "created": 1446426630000, - "id": "660987310889041920", - "language": "en" - }, - { - "content": "Gayle, Charlie, Nora @CBSThisMorning Congratulations! #1000thshow", - "contenttype": "text/plain", - "created": 1446220097000, - "id": "660121050978611205", - "language": "en" - }, - { - "content": "I believe your home should rise up to meet you. @TheEllenShow you nailed it with HOME. Tweethearts, grab a copy! https:\/\/t.co\/iFMnpRAsno", - "contenttype": "text/plain", - "created": 1446074433000, - "id": "659510090748182528", - "language": "en" - }, - { - "content": "Can I get a Witness?!\u270b\ud83c\udffe https:\/\/t.co\/tZ1QyAeSdE", - "contenttype": "text/plain", - "created": 1445821114000, - "id": "658447593865945089", - "language": "en" - }, - { - "content": ".@TheEllenShow you're a treasure.\nYour truth set a lot of people free.\n#Masterclass", - "contenttype": "text/plain", - "created": 1445821003000, - "id": "658447130026188800", - "language": "en" - }, - { - "content": "Hope you all are enjoying @TheEllenShow on #MasterClass.", - "contenttype": "text/plain", - "created": 1445820161000, - "id": "658443598313181188", - "language": "en" - }, - { - "content": ".@GloriaSteinem, shero to women everywhere, on how far we\u2019ve come and how far we need to go. #SuperSoulSunday 7p\/6c.\nhttps:\/\/t.co\/3e7oxXW02J", - "contenttype": "text/plain", - "created": 1445811545000, - "id": "658407457438363648", - "language": "en" - }, - { - "content": "RT @TheEllenShow: I told a story from my @OWNTV's #MasterClass on my show. Normally I\u2019d save it all for Sunday, but @Oprah made me. https:\/\u2026", - "contenttype": "text/plain", - "created": 1445804181000, - "id": "658376572521459712", - "language": "en" - }, - { - "content": ".@TheEllenShow is a master teacher of living her truth & living authentically as herself. #MasterClass tonight 8\/7c.\nhttps:\/\/t.co\/iLT2KgRsSw", - "contenttype": "text/plain", - "created": 1445804072000, - "id": "658376116575449088", - "language": "en" - }, - { - "content": ".@SheriSalata , @jonnysinc @part2pictures . Tears of joy and gratitude to you and our entire #BeliefTeam We DID IT!! My heart is full.\ud83d\ude4f\ud83c\udffe\ud83d\ude4f\ud83c\udffe", - "contenttype": "text/plain", - "created": 1445734755000, - "id": "658085377140363264", - "language": "en" - }, - { - "content": "Donna and Bob saw the tape of their story just days before she passed. They appreciated it. #RIPDonna", - "contenttype": "text/plain", - "created": 1445734097000, - "id": "658082618819280896", - "language": "en" - }, - { - "content": "RT @rempower: .@Oprah this series allowed me to slide into people's lives around the world and see the same in them ... we all have a belie\u2026", - "contenttype": "text/plain", - "created": 1445732769000, - "id": "658077046858383360", - "language": "en" - }, - { - "content": "All the stories moved me, My favorite line \" I must pass the stories on to my grandson otherwise our people will loose their way. #Belief", - "contenttype": "text/plain", - "created": 1445732579000, - "id": "658076253618991104", - "language": "en" - }, - { - "content": ".@part2pictures some of your best imagery yet. Filming Alex on the rock.#Belief", - "contenttype": "text/plain", - "created": 1445731782000, - "id": "658072908237934592", - "language": "en" - }, - { - "content": "I just love Alex and his daring #Belief to live fully the present Moment.", - "contenttype": "text/plain", - "created": 1445731561000, - "id": "658071980982206464", - "language": "en" - }, - { - "content": "RT @GrowingOWN: Let's do this! #Belief finale tweet tweet party. Thank you @Oprah! \ud83d\ude4f", - "contenttype": "text/plain", - "created": 1445731248000, - "id": "658070668785770496", - "language": "en" - }, - { - "content": "RT @lizkinnell: The epic finale of #Belief on @OWNTV is about to start. 8\/et Are you ready? What do you Believe?", - "contenttype": "text/plain", - "created": 1445731081000, - "id": "658069968534171648", - "language": "en" - }, - { - "content": "Thank you all for another beautiful night of Belief. Belief runs all day tomorrow for bingers and final episode!", - "contenttype": "text/plain", - "created": 1445648630000, - "id": "657724143115202560", - "language": "en" - }, - { - "content": "RT @OWNingLight: #Belief is the ultimate travel map to mass acceptance. \ud83d\ude4f\ud83c\udffb\u2764\ufe0f\ud83c\udf0d @Oprah", - "contenttype": "text/plain", - "created": 1445647285000, - "id": "657718501147197442", - "language": "en" - }, - { - "content": "\" I can feel my heart opening and faith coming back in\".. What's better than that? #Belief", - "contenttype": "text/plain", - "created": 1445646903000, - "id": "657716901951369218", - "language": "en" - }, - { - "content": "Hey Belief team mates can yo believe how quickly the week has passed? #Belief", - "contenttype": "text/plain", - "created": 1445645633000, - "id": "657711572492533760", - "language": "en" - }, - { - "content": "Ran into @5SOS backstage. Fun times with @TheEllenShow today! https:\/\/t.co\/2PP3W3RzXc", - "contenttype": "text/plain", - "created": 1445618531000, - "id": "657597898394173440", - "language": "en" - }, - { - "content": "Thanks All for another great night of #BELIEF", - "contenttype": "text/plain", - "created": 1445572548000, - "id": "657405031822430208", - "language": "en" - }, - { - "content": "RT @3LWTV: #BecomeWhatYouBelieve New meditation w\/ @Oprah @DeepakChopra begins 11\/2 Register https:\/\/t.co\/x0R9HWTAX0 #Belief https:\/\/t.co\/\u2026", - "contenttype": "text/plain", - "created": 1445571500000, - "id": "657400636745510912", - "language": "en" - }, - { - "content": "Ok west coast let's do it! #belief", - "contenttype": "text/plain", - "created": 1445569367000, - "id": "657391689439404033", - "language": "en" - }, - { - "content": "Thank u kind gentleman who told me I had kale in my teeth. Was eating kale chips with Quincy Jones. Went straight to @LairdLife party.", - "contenttype": "text/plain", - "created": 1445569296000, - "id": "657391393883619328", - "language": "en" - }, - { - "content": "Hello west coast twitterati.. See you at 8 for #Belief", - "contenttype": "text/plain", - "created": 1445566144000, - "id": "657378171872874496", - "language": "en" - }, - { - "content": "Thank you all for another beautiful night.#Belief", - "contenttype": "text/plain", - "created": 1445475948000, - "id": "656999861254918145", - "language": "en" - }, - { - "content": "RT @PRanganathan: \"Transformation is the rule of the game. The universe is not standing still.\" - Marcelo @OWNTV @Oprah #Belief", - "contenttype": "text/plain", - "created": 1445475602000, - "id": "656998409933451264", - "language": "en" - }, - { - "content": "\"The Universe is not standing still.. The whole Universe is expanding\" I love the dance between science and spirituality! #Belief", - "contenttype": "text/plain", - "created": 1445475580000, - "id": "656998320133398528", - "language": "en" - }, - { - "content": "\"Without our prayers and our songs we won't be here\" Apache leader.#Belief", - "contenttype": "text/plain", - "created": 1445473768000, - "id": "656990717504393216", - "language": "en" - }, - { - "content": "Notice her mother crying. She knows its last tine she will ever see her daughter.#Belief", - "contenttype": "text/plain", - "created": 1445473150000, - "id": "656988127433637888", - "language": "en" - }, - { - "content": "This final trial is unbelievable. Every hair gets pulled from her head one at a time. Now that is Something!!#Belief", - "contenttype": "text/plain", - "created": 1445473063000, - "id": "656987763644891136", - "language": "en" - }, - { - "content": "\"What my faith gives me no one can match\"#Belief", - "contenttype": "text/plain", - "created": 1445472961000, - "id": "656987336266223616", - "language": "en" - }, - { - "content": "It's a devotion to faith beyond anything Ive seen. Fascinating.Jain nuns. #Belief", - "contenttype": "text/plain", - "created": 1445472531000, - "id": "656985529951522816", - "language": "en" - }, - { - "content": "I'd never heard of Jain monks and nuns before doing this series. #Belief", - "contenttype": "text/plain", - "created": 1445472393000, - "id": "656984953037586433", - "language": "en" - }, - { - "content": "Good evening Team #Belief the Tweet is on!", - "contenttype": "text/plain", - "created": 1445472098000, - "id": "656983714883239937", - "language": "en" - }, - { - "content": "Thanks everyone for another Epic #Belief night!", - "contenttype": "text/plain", - "created": 1445302792000, - "id": "656273592485810176", - "language": "en" - }, - { - "content": "The Pastor and Imam represent the possibility of PEACE in the world. If they can do it. We can. Peace to All\ud83d\ude4f\ud83c\udffe #Belief", - "contenttype": "text/plain", - "created": 1445302749000, - "id": "656273415280705536", - "language": "en" - }, - { - "content": "We felt privileged to film the Hajj and explore the beauty of Islam.\n#99namesforGod #Belief", - "contenttype": "text/plain", - "created": 1445301110000, - "id": "656266540967424000", - "language": "en" - }, - { - "content": "Do you all believe in \"soul mates\"?\n#Belief", - "contenttype": "text/plain", - "created": 1445300138000, - "id": "656262462426238976", - "language": "en" - }, - { - "content": ".@RevEdBacon thank you so much for hosting tonight's #Belief at All Saints Church.", - "contenttype": "text/plain", - "created": 1445299749000, - "id": "656260832628756480", - "language": "en" - }, - { - "content": "This is one of the best love stories I've ever seen. #belief Ian and Larissa showing us the depths of love.#Belief", - "contenttype": "text/plain", - "created": 1445299614000, - "id": "656260263604310016", - "language": "en" - }, - { - "content": "Hey Everyone .. Tweeting from a bar in Atlanta with @SheriSalata OWN not in my hotel. Anything for #Belief", - "contenttype": "text/plain", - "created": 1445299326000, - "id": "656259057758654464", - "language": "en" - }, - { - "content": "RT @joshuadubois: When you see Ian & Larissa tonight on #BELIEF, you'll know what Christian love is all about. 8pmET, @OWNTV. Tune in! http\u2026", - "contenttype": "text/plain", - "created": 1445295716000, - "id": "656243916224638976", - "language": "en" - }, - { - "content": "RT @KimHaleDance: I Believe LAUGHTER. IS. CONTAGIOUS. What do you believe? See you tonight! 8\/7c.\u2764\ufe0f #Belief #Beliefin3words https:\/\/t.co\/x\u2026", - "contenttype": "text/plain", - "created": 1445295702000, - "id": "656243854610337793", - "language": "en" - }, - { - "content": "RT @OWNTV: See the world through someone else\u2019s soul. The epic journey of #Belief continues tonight at 8\/7c.\nhttps:\/\/t.co\/UKKMHZuC0g", - "contenttype": "text/plain", - "created": 1445295668000, - "id": "656243714507931648", - "language": "en" - }, - { - "content": "RT @OWNTV: Mendel Hurwitz's inquisitive nature grounded him in his faith. See where it's taken him now: https:\/\/t.co\/2iWmNOxK9r https:\/\/t.c\u2026", - "contenttype": "text/plain", - "created": 1445295661000, - "id": "656243684720050176", - "language": "en" - }, - { - "content": "Thank you for opening up the heart space and letting #Belief in. Tonight it keeps getting better. See you at 8\/7c\nhttps:\/\/t.co\/E65vkTray9", - "contenttype": "text/plain", - "created": 1445279425000, - "id": "656175584943341568", - "language": "en" - }, - { - "content": "I believe in the @weightwatchers program so much I decided to invest, join the Board, and partner in #wwfamily evolution.", - "contenttype": "text/plain", - "created": 1445275802000, - "id": "656160388526899200", - "language": "en" - }, - { - "content": "RT @AVAETC: Debut episode of #BELIEF has now aired on both coasts. Trended for 4 hours. Brava, @Oprah + team. 6 beautiful nights to come. B\u2026", - "contenttype": "text/plain", - "created": 1445229489000, - "id": "655966138279432192", - "language": "en" - }, - { - "content": "RT @3LWTV: 6 more epic nights of #Belief to come! See you tomorrow 8pET\/7pCT for the next installment! @OWNTV @Oprah", - "contenttype": "text/plain", - "created": 1445227342000, - "id": "655957135688241152", - "language": "en" - }, - { - "content": "RT @ledisi: I love how Ancestry and Tradition is honored throughout every story in #Belief @OWNTV @OWN Thank you @Oprah this is so importa\u2026", - "contenttype": "text/plain", - "created": 1445225935000, - "id": "655951232981295104", - "language": "en" - }, - { - "content": "RT @UN: Showing #Belief at the UN \"is a bigger dream than I had\" - @Oprah https:\/\/t.co\/VC4OqD8yub #Belief #GlobalGoals https:\/\/t.co\/LZyGuC7\u2026", - "contenttype": "text/plain", - "created": 1445225228000, - "id": "655948267868426240", - "language": "en" - }, - { - "content": "RT @UzoAduba: To seek, to question, to learn, to teach, to pass it on; some of the breathtaking themes running like water throughout #Belie\u2026", - "contenttype": "text/plain", - "created": 1445225008000, - "id": "655947345197076480", - "language": "en" - }, - { - "content": "RT @iamtikasumpter: #Belief had me in awe. Faith in the divine is the constant that links us together. It's all beautiful and righteous. Gi\u2026", - "contenttype": "text/plain", - "created": 1445224852000, - "id": "655946689249828864", - "language": "en" - }, - { - "content": "West Coast... Here we go. #Belief", - "contenttype": "text/plain", - "created": 1445224140000, - "id": "655943701840048128", - "language": "en" - }, - { - "content": "Big surprise at icanady watch party. Epic night. #Belief. So much more to come. https:\/\/t.co\/kBDtFwGyQs", - "contenttype": "text/plain", - "created": 1445220694000, - "id": "655929249669378048", - "language": "en" - }, - { - "content": "I loved the Mendel story so much because it represents right of passasge. \" bye bye to childhood\".#Belief", - "contenttype": "text/plain", - "created": 1445215032000, - "id": "655905500056391682", - "language": "en" - }, - { - "content": "RT @squee_machine: This is a visual feast! Completely gorgeous and I am transfixed. The colors, the composition, cinematography, vibe. #Bel\u2026", - "contenttype": "text/plain", - "created": 1445214538000, - "id": "655903432079904768", - "language": "en" - }, - { - "content": "RT @JamesTyphany: Looking at @ #Belief I really needed this in my life thanks @OWNTV", - "contenttype": "text/plain", - "created": 1445214534000, - "id": "655903413385891840", - "language": "en" - }, - { - "content": "Just surprised a \"watch party\" @icanady 's house. #Belief http:\/\/t.co\/Di0I3OooCh", - "contenttype": "text/plain", - "created": 1445214502000, - "id": "655903277796732931", - "language": "en" - }, - { - "content": "RT @MsLaWandaa: I love moments of sitting among elders and learning. I can feel this moment was special for @ReshThakkar #Belief", - "contenttype": "text/plain", - "created": 1445214498000, - "id": "655903264374812672", - "language": "en" - }, - { - "content": "RT @xonecole: \"Do you have to have religion or is being a good person...is that enough?\" #Belief", - "contenttype": "text/plain", - "created": 1445214339000, - "id": "655902594171203584", - "language": "en" - }, - { - "content": "RT @ChivonJohn: Very inspired by the stories on #belief https:\/\/t.co\/uMRCCfCWcY", - "contenttype": "text/plain", - "created": 1445214327000, - "id": "655902545903140864", - "language": "en" - }, - { - "content": "RT @KiranSArora: powerful personal story that many of us can connect to. searching for what's missing, spiritual liberation. @ReshThakkar #\u2026", - "contenttype": "text/plain", - "created": 1445214128000, - "id": "655901708506103812", - "language": "en" - }, - { - "content": "RT @createdbyerica: \"I'm willing to go as far as I have to go to get that feeling in my heart of being connected to the Divine.\" - Reshma @\u2026", - "contenttype": "text/plain", - "created": 1445213967000, - "id": "655901033952993280", - "language": "en" - }, - { - "content": "RT @UrbnHealthNP: I am enjoying this so much. #Belief", - "contenttype": "text/plain", - "created": 1445213904000, - "id": "655900772467474435", - "language": "en" - }, - { - "content": "RT @DrMABrown: Your relationship with #belief can be completely different than how others experience it", - "contenttype": "text/plain", - "created": 1445213901000, - "id": "655900756604620800", - "language": "en" - }, - { - "content": "RT @ckerfin: On another river on the other side of the world... transition between stories, drawing connections to different beliefs #Belie\u2026", - "contenttype": "text/plain", - "created": 1445213721000, - "id": "655900002644987905", - "language": "en" - }, - { - "content": "RT @nikfarrior: So profound. We are all born into #Belief @Oprah @OWN @OWNTV", - "contenttype": "text/plain", - "created": 1445213706000, - "id": "655899942242775040", - "language": "en" - }, - { - "content": "RT @fgaboys: @Oprah the start of #Belief is riveting and edifying. It makes you want more and inspires you too see what's coming up next. \u2026", - "contenttype": "text/plain", - "created": 1445213699000, - "id": "655899910563217408", - "language": "en" - }, - { - "content": "RT @MalikaGhosh: Here we GO- let's lose ourselves #belief NOW @Oprah JOY http:\/\/t.co\/vYSNLd3LvC", - "contenttype": "text/plain", - "created": 1445212831000, - "id": "655896269542420480", - "language": "en" - }, - { - "content": "RT @MastinKipp: .@Oprah and team are about to bring the Light with #Belief.", - "contenttype": "text/plain", - "created": 1445212747000, - "id": "655895916675600384", - "language": "en" - }, - { - "content": "RT @GrowingOWN: 7 minutes, y'all! #BELIEF", - "contenttype": "text/plain", - "created": 1445212465000, - "id": "655894734968217600", - "language": "en" - }, - { - "content": "RT @LPToussaint: BELIEF defines experience.Choose wisely #Belief Tonight on OWN", - "contenttype": "text/plain", - "created": 1445211845000, - "id": "655892134524878848", - "language": "en" - }, - { - "content": "RT @TheBeBeWinans: Congratulations to my dear friend @Oprah on the launch of your series #BELIEF #Tonight on @OWNTV. Your friendship inspir\u2026", - "contenttype": "text/plain", - "created": 1445211835000, - "id": "655892094905532416", - "language": "en" - }, - { - "content": "RT @UzoAduba: Thirty minutes to @Oprah #Belief. Pretty sure it's about to be our favorite thing.", - "contenttype": "text/plain", - "created": 1445211833000, - "id": "655892084314890240", - "language": "en" - }, - { - "content": "RT @DeandresVoice: Moments away from the start of @SuperSoulSunday and the first night of the epic #Belief series on @OWNTV - are you ready\u2026", - "contenttype": "text/plain", - "created": 1445209201000, - "id": "655881046102142978", - "language": "en" - }, - { - "content": "RT @jennaldewan: I CANT WAIT FOR THIS TONIGHT!!!!!! Got my popcorn, got my tissues I am readyyyyyy! #Belief https:\/\/t.co\/WluTdeEqal", - "contenttype": "text/plain", - "created": 1445209181000, - "id": "655880959535939584", - "language": "en" - }, - { - "content": "RT @indiaarie: U heard about @Oprah passion project? It's called #Belief - I've see some & Its special! Tonight - 7c\/8e - 7 nights!! Whose\u2026", - "contenttype": "text/plain", - "created": 1445208945000, - "id": "655879970732949504", - "language": "en" - }, - { - "content": "Wow, I liked @TheRock before, now I really SEE how special he is. The daughter story was IT for me. So great! #MasterClass", - "contenttype": "text/plain", - "created": 1447639154000, - "id": "666073008692314113", - "language": "en" - }, - { - "content": ".@TheRock how did you Know to listen to your gut and Not go back to football? #Masterclass", - "contenttype": "text/plain", - "created": 1447638226000, - "id": "666069114889179136", - "language": "en" - }, - { - "content": ".@TheRock moving back in with your parents so humbling. \" on the other side of your pain is something good if you can hold on\" #masterclass", - "contenttype": "text/plain", - "created": 1447638067000, - "id": "666068446325665792", - "language": "en" - }, - { - "content": "Wow aren't you loving @TheRock and his candor? #Masterclass", - "contenttype": "text/plain", - "created": 1447637459000, - "id": "666065895932973057", - "language": "en" - }, - { - "content": "RT @patt_t: @TheRock @Oprah @RichOnOWN @OWNTV this interview makes me like you as a fellow human even more for being so real.", - "contenttype": "text/plain", - "created": 1447637030000, - "id": "666064097562247168", - "language": "en" - }, - { - "content": "\"Be You\".. That's the best advice ever @TheRock #MastersClass", - "contenttype": "text/plain", - "created": 1447636205000, - "id": "666060637181644800", - "language": "en" - }, - { - "content": "Supersoulers let's lift our spirits pray and hold Paris in the Light\ud83d\ude4f\ud83c\udffe", - "contenttype": "text/plain", - "created": 1447602477000, - "id": "665919171062927360", - "language": "en" - }, - { - "content": "RT @DeepakChopra: What I learned in week 1: Become What You Believe 21-Day Meditation Experience - https:\/\/t.co\/kqaMaMqEUp #GoogleAlerts", - "contenttype": "text/plain", - "created": 1447098990000, - "id": "663807393063538688", - "language": "en" - }, - { - "content": "Watching Bryan Stevenson on #SuperSoulSunday! \"You are not the worst mistake you ever made\".\nAren't we glad about that.", - "contenttype": "text/plain", - "created": 1446998643000, - "id": "663386507856736257", - "language": "en" - }, - { - "content": ".@CherylStrayed BRAVE ENOUGH my new favorite thing! Gonna buy a copy for all my girls. #Perfectgift https:\/\/t.co\/gz1tnv8t8K", - "contenttype": "text/plain", - "created": 1446915955000, - "id": "663039689360695296", - "language": "en" - }, - { - "content": "Stevie Wonder singing \"Happy Birthday to you!\" to my dear mariashriver. A phenomenal woman and\u2026 https:\/\/t.co\/Ygm5eDIs4f", - "contenttype": "text/plain", - "created": 1446881193000, - "id": "662893888080879616", - "language": "en" - }, - { - "content": "It\u2019s my faaaaavorite time of the Year! For the first time you can shop the list on @amazon! https:\/\/t.co\/a6GMvVrhjN https:\/\/t.co\/sJlQMROq5U", - "contenttype": "text/plain", - "created": 1446744186000, - "id": "662319239844380672", - "language": "en" - }, - { - "content": "Incredible story \"the spirit of the Lord is on you\" thanks for sharing @smokey_robinson #Masterclass", - "contenttype": "text/plain", - "created": 1446428929000, - "id": "660996956861280256", - "language": "en" - }, - { - "content": "Wasnt that incredible story about @smokey_robinson 's dad leaving his family at 12. #MasterClass", - "contenttype": "text/plain", - "created": 1446426630000, - "id": "660987310889041920", - "language": "en" - }, - { - "content": "Gayle, Charlie, Nora @CBSThisMorning Congratulations! #1000thshow", - "contenttype": "text/plain", - "created": 1446220097000, - "id": "660121050978611205", - "language": "en" - }, - { - "content": "I believe your home should rise up to meet you. @TheEllenShow you nailed it with HOME. Tweethearts, grab a copy! https:\/\/t.co\/iFMnpRAsno", - "contenttype": "text/plain", - "created": 1446074433000, - "id": "659510090748182528", - "language": "en" - }, - { - "content": "Can I get a Witness?!\u270b\ud83c\udffe https:\/\/t.co\/tZ1QyAeSdE", - "contenttype": "text/plain", - "created": 1445821114000, - "id": "658447593865945089", - "language": "en" - }, - { - "content": ".@TheEllenShow you're a treasure.\nYour truth set a lot of people free.\n#Masterclass", - "contenttype": "text/plain", - "created": 1445821003000, - "id": "658447130026188800", - "language": "en" - }, - { - "content": "Hope you all are enjoying @TheEllenShow on #MasterClass.", - "contenttype": "text/plain", - "created": 1445820161000, - "id": "658443598313181188", - "language": "en" - }, - { - "content": ".@GloriaSteinem, shero to women everywhere, on how far we\u2019ve come and how far we need to go. #SuperSoulSunday 7p\/6c.\nhttps:\/\/t.co\/3e7oxXW02J", - "contenttype": "text/plain", - "created": 1445811545000, - "id": "658407457438363648", - "language": "en" - }, - { - "content": "RT @TheEllenShow: I told a story from my @OWNTV's #MasterClass on my show. Normally I\u2019d save it all for Sunday, but @Oprah made me. https:\/\u2026", - "contenttype": "text/plain", - "created": 1445804181000, - "id": "658376572521459712", - "language": "en" - }, - { - "content": ".@TheEllenShow is a master teacher of living her truth & living authentically as herself. #MasterClass tonight 8\/7c.\nhttps:\/\/t.co\/iLT2KgRsSw", - "contenttype": "text/plain", - "created": 1445804072000, - "id": "658376116575449088", - "language": "en" - }, - { - "content": ".@SheriSalata , @jonnysinc @part2pictures . Tears of joy and gratitude to you and our entire #BeliefTeam We DID IT!! My heart is full.\ud83d\ude4f\ud83c\udffe\ud83d\ude4f\ud83c\udffe", - "contenttype": "text/plain", - "created": 1445734755000, - "id": "658085377140363264", - "language": "en" - }, - { - "content": "Donna and Bob saw the tape of their story just days before she passed. They appreciated it. #RIPDonna", - "contenttype": "text/plain", - "created": 1445734097000, - "id": "658082618819280896", - "language": "en" - }, - { - "content": "RT @rempower: .@Oprah this series allowed me to slide into people's lives around the world and see the same in them ... we all have a belie\u2026", - "contenttype": "text/plain", - "created": 1445732769000, - "id": "658077046858383360", - "language": "en" - }, - { - "content": "All the stories moved me, My favorite line \" I must pass the stories on to my grandson otherwise our people will loose their way. #Belief", - "contenttype": "text/plain", - "created": 1445732579000, - "id": "658076253618991104", - "language": "en" - }, - { - "content": ".@part2pictures some of your best imagery yet. Filming Alex on the rock.#Belief", - "contenttype": "text/plain", - "created": 1445731782000, - "id": "658072908237934592", - "language": "en" - }, - { - "content": "I just love Alex and his daring #Belief to live fully the present Moment.", - "contenttype": "text/plain", - "created": 1445731561000, - "id": "658071980982206464", - "language": "en" - }, - { - "content": "RT @GrowingOWN: Let's do this! #Belief finale tweet tweet party. Thank you @Oprah! \ud83d\ude4f", - "contenttype": "text/plain", - "created": 1445731248000, - "id": "658070668785770496", - "language": "en" - }, - { - "content": "RT @lizkinnell: The epic finale of #Belief on @OWNTV is about to start. 8\/et Are you ready? What do you Believe?", - "contenttype": "text/plain", - "created": 1445731081000, - "id": "658069968534171648", - "language": "en" - }, - { - "content": "Thank you all for another beautiful night of Belief. Belief runs all day tomorrow for bingers and final episode!", - "contenttype": "text/plain", - "created": 1445648630000, - "id": "657724143115202560", - "language": "en" - }, - { - "content": "RT @OWNingLight: #Belief is the ultimate travel map to mass acceptance. \ud83d\ude4f\ud83c\udffb\u2764\ufe0f\ud83c\udf0d @Oprah", - "contenttype": "text/plain", - "created": 1445647285000, - "id": "657718501147197442", - "language": "en" - }, - { - "content": "\" I can feel my heart opening and faith coming back in\".. What's better than that? #Belief", - "contenttype": "text/plain", - "created": 1445646903000, - "id": "657716901951369218", - "language": "en" - }, - { - "content": "Hey Belief team mates can yo believe how quickly the week has passed? #Belief", - "contenttype": "text/plain", - "created": 1445645633000, - "id": "657711572492533760", - "language": "en" - }, - { - "content": "Ran into @5SOS backstage. Fun times with @TheEllenShow today! https:\/\/t.co\/2PP3W3RzXc", - "contenttype": "text/plain", - "created": 1445618531000, - "id": "657597898394173440", - "language": "en" - }, - { - "content": "Thanks All for another great night of #BELIEF", - "contenttype": "text/plain", - "created": 1445572548000, - "id": "657405031822430208", - "language": "en" - }, - { - "content": "RT @3LWTV: #BecomeWhatYouBelieve New meditation w\/ @Oprah @DeepakChopra begins 11\/2 Register https:\/\/t.co\/x0R9HWTAX0 #Belief https:\/\/t.co\/\u2026", - "contenttype": "text/plain", - "created": 1445571500000, - "id": "657400636745510912", - "language": "en" - }, - { - "content": "Ok west coast let's do it! #belief", - "contenttype": "text/plain", - "created": 1445569367000, - "id": "657391689439404033", - "language": "en" - }, - { - "content": "Thank u kind gentleman who told me I had kale in my teeth. Was eating kale chips with Quincy Jones. Went straight to @LairdLife party.", - "contenttype": "text/plain", - "created": 1445569296000, - "id": "657391393883619328", - "language": "en" - }, - { - "content": "Hello west coast twitterati.. See you at 8 for #Belief", - "contenttype": "text/plain", - "created": 1445566144000, - "id": "657378171872874496", - "language": "en" - }, - { - "content": "Thank you all for another beautiful night.#Belief", - "contenttype": "text/plain", - "created": 1445475948000, - "id": "656999861254918145", - "language": "en" - }, - { - "content": "RT @PRanganathan: \"Transformation is the rule of the game. The universe is not standing still.\" - Marcelo @OWNTV @Oprah #Belief", - "contenttype": "text/plain", - "created": 1445475602000, - "id": "656998409933451264", - "language": "en" - }, - { - "content": "\"The Universe is not standing still.. The whole Universe is expanding\" I love the dance between science and spirituality! #Belief", - "contenttype": "text/plain", - "created": 1445475580000, - "id": "656998320133398528", - "language": "en" - }, - { - "content": "\"Without our prayers and our songs we won't be here\" Apache leader.#Belief", - "contenttype": "text/plain", - "created": 1445473768000, - "id": "656990717504393216", - "language": "en" - }, - { - "content": "Notice her mother crying. She knows its last tine she will ever see her daughter.#Belief", - "contenttype": "text/plain", - "created": 1445473150000, - "id": "656988127433637888", - "language": "en" - }, - { - "content": "This final trial is unbelievable. Every hair gets pulled from her head one at a time. Now that is Something!!#Belief", - "contenttype": "text/plain", - "created": 1445473063000, - "id": "656987763644891136", - "language": "en" - }, - { - "content": "\"What my faith gives me no one can match\"#Belief", - "contenttype": "text/plain", - "created": 1445472961000, - "id": "656987336266223616", - "language": "en" - }, - { - "content": "It's a devotion to faith beyond anything Ive seen. Fascinating.Jain nuns. #Belief", - "contenttype": "text/plain", - "created": 1445472531000, - "id": "656985529951522816", - "language": "en" - }, - { - "content": "I'd never heard of Jain monks and nuns before doing this series. #Belief", - "contenttype": "text/plain", - "created": 1445472393000, - "id": "656984953037586433", - "language": "en" - }, - { - "content": "Good evening Team #Belief the Tweet is on!", - "contenttype": "text/plain", - "created": 1445472098000, - "id": "656983714883239937", - "language": "en" - }, - { - "content": "Thanks everyone for another Epic #Belief night!", - "contenttype": "text/plain", - "created": 1445302792000, - "id": "656273592485810176", - "language": "en" - }, - { - "content": "The Pastor and Imam represent the possibility of PEACE in the world. If they can do it. We can. Peace to All\ud83d\ude4f\ud83c\udffe #Belief", - "contenttype": "text/plain", - "created": 1445302749000, - "id": "656273415280705536", - "language": "en" - }, - { - "content": "We felt privileged to film the Hajj and explore the beauty of Islam.\n#99namesforGod #Belief", - "contenttype": "text/plain", - "created": 1445301110000, - "id": "656266540967424000", - "language": "en" - }, - { - "content": "Do you all believe in \"soul mates\"?\n#Belief", - "contenttype": "text/plain", - "created": 1445300138000, - "id": "656262462426238976", - "language": "en" - }, - { - "content": ".@RevEdBacon thank you so much for hosting tonight's #Belief at All Saints Church.", - "contenttype": "text/plain", - "created": 1445299749000, - "id": "656260832628756480", - "language": "en" - }, - { - "content": "This is one of the best love stories I've ever seen. #belief Ian and Larissa showing us the depths of love.#Belief", - "contenttype": "text/plain", - "created": 1445299614000, - "id": "656260263604310016", - "language": "en" - }, - { - "content": "Hey Everyone .. Tweeting from a bar in Atlanta with @SheriSalata OWN not in my hotel. Anything for #Belief", - "contenttype": "text/plain", - "created": 1445299326000, - "id": "656259057758654464", - "language": "en" - }, - { - "content": "RT @joshuadubois: When you see Ian & Larissa tonight on #BELIEF, you'll know what Christian love is all about. 8pmET, @OWNTV. Tune in! http\u2026", - "contenttype": "text/plain", - "created": 1445295716000, - "id": "656243916224638976", - "language": "en" - }, - { - "content": "RT @KimHaleDance: I Believe LAUGHTER. IS. CONTAGIOUS. What do you believe? See you tonight! 8\/7c.\u2764\ufe0f #Belief #Beliefin3words https:\/\/t.co\/x\u2026", - "contenttype": "text/plain", - "created": 1445295702000, - "id": "656243854610337793", - "language": "en" - }, - { - "content": "RT @OWNTV: See the world through someone else\u2019s soul. The epic journey of #Belief continues tonight at 8\/7c.\nhttps:\/\/t.co\/UKKMHZuC0g", - "contenttype": "text/plain", - "created": 1445295668000, - "id": "656243714507931648", - "language": "en" - }, - { - "content": "RT @OWNTV: Mendel Hurwitz's inquisitive nature grounded him in his faith. See where it's taken him now: https:\/\/t.co\/2iWmNOxK9r https:\/\/t.c\u2026", - "contenttype": "text/plain", - "created": 1445295661000, - "id": "656243684720050176", - "language": "en" - }, - { - "content": "Thank you for opening up the heart space and letting #Belief in. Tonight it keeps getting better. See you at 8\/7c\nhttps:\/\/t.co\/E65vkTray9", - "contenttype": "text/plain", - "created": 1445279425000, - "id": "656175584943341568", - "language": "en" - }, - { - "content": "I believe in the @weightwatchers program so much I decided to invest, join the Board, and partner in #wwfamily evolution.", - "contenttype": "text/plain", - "created": 1445275802000, - "id": "656160388526899200", - "language": "en" - }, - { - "content": "RT @AVAETC: Debut episode of #BELIEF has now aired on both coasts. Trended for 4 hours. Brava, @Oprah + team. 6 beautiful nights to come. B\u2026", - "contenttype": "text/plain", - "created": 1445229489000, - "id": "655966138279432192", - "language": "en" - }, - { - "content": "RT @3LWTV: 6 more epic nights of #Belief to come! See you tomorrow 8pET\/7pCT for the next installment! @OWNTV @Oprah", - "contenttype": "text/plain", - "created": 1445227342000, - "id": "655957135688241152", - "language": "en" - }, - { - "content": "RT @ledisi: I love how Ancestry and Tradition is honored throughout every story in #Belief @OWNTV @OWN Thank you @Oprah this is so importa\u2026", - "contenttype": "text/plain", - "created": 1445225935000, - "id": "655951232981295104", - "language": "en" - }, - { - "content": "RT @UN: Showing #Belief at the UN \"is a bigger dream than I had\" - @Oprah https:\/\/t.co\/VC4OqD8yub #Belief #GlobalGoals https:\/\/t.co\/LZyGuC7\u2026", - "contenttype": "text/plain", - "created": 1445225228000, - "id": "655948267868426240", - "language": "en" - }, - { - "content": "RT @UzoAduba: To seek, to question, to learn, to teach, to pass it on; some of the breathtaking themes running like water throughout #Belie\u2026", - "contenttype": "text/plain", - "created": 1445225008000, - "id": "655947345197076480", - "language": "en" - }, - { - "content": "RT @iamtikasumpter: #Belief had me in awe. Faith in the divine is the constant that links us together. It's all beautiful and righteous. Gi\u2026", - "contenttype": "text/plain", - "created": 1445224852000, - "id": "655946689249828864", - "language": "en" - }, - { - "content": "West Coast... Here we go. #Belief", - "contenttype": "text/plain", - "created": 1445224140000, - "id": "655943701840048128", - "language": "en" - }, - { - "content": "Big surprise at icanady watch party. Epic night. #Belief. So much more to come. https:\/\/t.co\/kBDtFwGyQs", - "contenttype": "text/plain", - "created": 1445220694000, - "id": "655929249669378048", - "language": "en" - }, - { - "content": "I loved the Mendel story so much because it represents right of passasge. \" bye bye to childhood\".#Belief", - "contenttype": "text/plain", - "created": 1445215032000, - "id": "655905500056391682", - "language": "en" - }, - { - "content": "RT @squee_machine: This is a visual feast! Completely gorgeous and I am transfixed. The colors, the composition, cinematography, vibe. #Bel\u2026", - "contenttype": "text/plain", - "created": 1445214538000, - "id": "655903432079904768", - "language": "en" - }, - { - "content": "RT @JamesTyphany: Looking at @ #Belief I really needed this in my life thanks @OWNTV", - "contenttype": "text/plain", - "created": 1445214534000, - "id": "655903413385891840", - "language": "en" - }, - { - "content": "Just surprised a \"watch party\" @icanady 's house. #Belief http:\/\/t.co\/Di0I3OooCh", - "contenttype": "text/plain", - "created": 1445214502000, - "id": "655903277796732931", - "language": "en" - }, - { - "content": "RT @MsLaWandaa: I love moments of sitting among elders and learning. I can feel this moment was special for @ReshThakkar #Belief", - "contenttype": "text/plain", - "created": 1445214498000, - "id": "655903264374812672", - "language": "en" - }, - { - "content": "RT @xonecole: \"Do you have to have religion or is being a good person...is that enough?\" #Belief", - "contenttype": "text/plain", - "created": 1445214339000, - "id": "655902594171203584", - "language": "en" - }, - { - "content": "RT @ChivonJohn: Very inspired by the stories on #belief https:\/\/t.co\/uMRCCfCWcY", - "contenttype": "text/plain", - "created": 1445214327000, - "id": "655902545903140864", - "language": "en" - }, - { - "content": "RT @KiranSArora: powerful personal story that many of us can connect to. searching for what's missing, spiritual liberation. @ReshThakkar #\u2026", - "contenttype": "text/plain", - "created": 1445214128000, - "id": "655901708506103812", - "language": "en" - }, - { - "content": "RT @createdbyerica: \"I'm willing to go as far as I have to go to get that feeling in my heart of being connected to the Divine.\" - Reshma @\u2026", - "contenttype": "text/plain", - "created": 1445213967000, - "id": "655901033952993280", - "language": "en" - }, - { - "content": "RT @UrbnHealthNP: I am enjoying this so much. #Belief", - "contenttype": "text/plain", - "created": 1445213904000, - "id": "655900772467474435", - "language": "en" - }, - { - "content": "RT @DrMABrown: Your relationship with #belief can be completely different than how others experience it", - "contenttype": "text/plain", - "created": 1445213901000, - "id": "655900756604620800", - "language": "en" - }, - { - "content": "RT @ckerfin: On another river on the other side of the world... transition between stories, drawing connections to different beliefs #Belie\u2026", - "contenttype": "text/plain", - "created": 1445213721000, - "id": "655900002644987905", - "language": "en" - }, - { - "content": "RT @nikfarrior: So profound. We are all born into #Belief @Oprah @OWN @OWNTV", - "contenttype": "text/plain", - "created": 1445213706000, - "id": "655899942242775040", - "language": "en" - }, - { - "content": "RT @fgaboys: @Oprah the start of #Belief is riveting and edifying. It makes you want more and inspires you too see what's coming up next. \u2026", - "contenttype": "text/plain", - "created": 1445213699000, - "id": "655899910563217408", - "language": "en" - }, - { - "content": "RT @MalikaGhosh: Here we GO- let's lose ourselves #belief NOW @Oprah JOY http:\/\/t.co\/vYSNLd3LvC", - "contenttype": "text/plain", - "created": 1445212831000, - "id": "655896269542420480", - "language": "en" - }, - { - "content": "RT @MastinKipp: .@Oprah and team are about to bring the Light with #Belief.", - "contenttype": "text/plain", - "created": 1445212747000, - "id": "655895916675600384", - "language": "en" - }, - { - "content": "RT @GrowingOWN: 7 minutes, y'all! #BELIEF", - "contenttype": "text/plain", - "created": 1445212465000, - "id": "655894734968217600", - "language": "en" - }, - { - "content": "RT @LPToussaint: BELIEF defines experience.Choose wisely #Belief Tonight on OWN", - "contenttype": "text/plain", - "created": 1445211845000, - "id": "655892134524878848", - "language": "en" - }, - { - "content": "RT @TheBeBeWinans: Congratulations to my dear friend @Oprah on the launch of your series #BELIEF #Tonight on @OWNTV. Your friendship inspir\u2026", - "contenttype": "text/plain", - "created": 1445211835000, - "id": "655892094905532416", - "language": "en" - }, - { - "content": "RT @UzoAduba: Thirty minutes to @Oprah #Belief. Pretty sure it's about to be our favorite thing.", - "contenttype": "text/plain", - "created": 1445211833000, - "id": "655892084314890240", - "language": "en" - }, - { - "content": "RT @DeandresVoice: Moments away from the start of @SuperSoulSunday and the first night of the epic #Belief series on @OWNTV - are you ready\u2026", - "contenttype": "text/plain", - "created": 1445209201000, - "id": "655881046102142978", - "language": "en" - }, - { - "content": "RT @jennaldewan: I CANT WAIT FOR THIS TONIGHT!!!!!! Got my popcorn, got my tissues I am readyyyyyy! #Belief https:\/\/t.co\/WluTdeEqal", - "contenttype": "text/plain", - "created": 1445209181000, - "id": "655880959535939584", - "language": "en" - }, - { - "content": "RT @indiaarie: U heard about @Oprah passion project? It's called #Belief - I've see some & Its special! Tonight - 7c\/8e - 7 nights!! Whose\u2026", - "contenttype": "text/plain", - "created": 1445208945000, - "id": "655879970732949504", - "language": "en" - }, - { - "content": "Wow, I liked @TheRock before, now I really SEE how special he is. The daughter story was IT for me. So great! #MasterClass", - "contenttype": "text/plain", - "created": 1447639154000, - "id": "666073008692314113", - "language": "en" - }, - { - "content": ".@TheRock how did you Know to listen to your gut and Not go back to football? #Masterclass", - "contenttype": "text/plain", - "created": 1447638226000, - "id": "666069114889179136", - "language": "en" - }, - { - "content": ".@TheRock moving back in with your parents so humbling. \" on the other side of your pain is something good if you can hold on\" #masterclass", - "contenttype": "text/plain", - "created": 1447638067000, - "id": "666068446325665792", - "language": "en" - }, - { - "content": "Wow aren't you loving @TheRock and his candor? #Masterclass", - "contenttype": "text/plain", - "created": 1447637459000, - "id": "666065895932973057", - "language": "en" - }, - { - "content": "RT @patt_t: @TheRock @Oprah @RichOnOWN @OWNTV this interview makes me like you as a fellow human even more for being so real.", - "contenttype": "text/plain", - "created": 1447637030000, - "id": "666064097562247168", - "language": "en" - }, - { - "content": "\"Be You\".. That's the best advice ever @TheRock #MastersClass", - "contenttype": "text/plain", - "created": 1447636205000, - "id": "666060637181644800", - "language": "en" - }, - { - "content": "Supersoulers let's lift our spirits pray and hold Paris in the Light\ud83d\ude4f\ud83c\udffe", - "contenttype": "text/plain", - "created": 1447602477000, - "id": "665919171062927360", - "language": "en" - }, - { - "content": "RT @DeepakChopra: What I learned in week 1: Become What You Believe 21-Day Meditation Experience - https:\/\/t.co\/kqaMaMqEUp #GoogleAlerts", - "contenttype": "text/plain", - "created": 1447098990000, - "id": "663807393063538688", - "language": "en" - }, - { - "content": "Watching Bryan Stevenson on #SuperSoulSunday! \"You are not the worst mistake you ever made\".\nAren't we glad about that.", - "contenttype": "text/plain", - "created": 1446998643000, - "id": "663386507856736257", - "language": "en" - }, - { - "content": ".@CherylStrayed BRAVE ENOUGH my new favorite thing! Gonna buy a copy for all my girls. #Perfectgift https:\/\/t.co\/gz1tnv8t8K", - "contenttype": "text/plain", - "created": 1446915955000, - "id": "663039689360695296", - "language": "en" - }, - { - "content": "Stevie Wonder singing \"Happy Birthday to you!\" to my dear mariashriver. A phenomenal woman and\u2026 https:\/\/t.co\/Ygm5eDIs4f", - "contenttype": "text/plain", - "created": 1446881193000, - "id": "662893888080879616", - "language": "en" - }, - { - "content": "It\u2019s my faaaaavorite time of the Year! For the first time you can shop the list on @amazon! https:\/\/t.co\/a6GMvVrhjN https:\/\/t.co\/sJlQMROq5U", - "contenttype": "text/plain", - "created": 1446744186000, - "id": "662319239844380672", - "language": "en" - }, - { - "content": "Incredible story \"the spirit of the Lord is on you\" thanks for sharing @smokey_robinson #Masterclass", - "contenttype": "text/plain", - "created": 1446428929000, - "id": "660996956861280256", - "language": "en" - }, - { - "content": "Wasnt that incredible story about @smokey_robinson 's dad leaving his family at 12. #MasterClass", - "contenttype": "text/plain", - "created": 1446426630000, - "id": "660987310889041920", - "language": "en" - }, - { - "content": "Gayle, Charlie, Nora @CBSThisMorning Congratulations! #1000thshow", - "contenttype": "text/plain", - "created": 1446220097000, - "id": "660121050978611205", - "language": "en" - }, - { - "content": "I believe your home should rise up to meet you. @TheEllenShow you nailed it with HOME. Tweethearts, grab a copy! https:\/\/t.co\/iFMnpRAsno", - "contenttype": "text/plain", - "created": 1446074433000, - "id": "659510090748182528", - "language": "en" - }, - { - "content": "Can I get a Witness?!\u270b\ud83c\udffe https:\/\/t.co\/tZ1QyAeSdE", - "contenttype": "text/plain", - "created": 1445821114000, - "id": "658447593865945089", - "language": "en" - }, - { - "content": ".@TheEllenShow you're a treasure.\nYour truth set a lot of people free.\n#Masterclass", - "contenttype": "text/plain", - "created": 1445821003000, - "id": "658447130026188800", - "language": "en" - }, - { - "content": "Hope you all are enjoying @TheEllenShow on #MasterClass.", - "contenttype": "text/plain", - "created": 1445820161000, - "id": "658443598313181188", - "language": "en" - }, - { - "content": ".@GloriaSteinem, shero to women everywhere, on how far we\u2019ve come and how far we need to go. #SuperSoulSunday 7p\/6c.\nhttps:\/\/t.co\/3e7oxXW02J", - "contenttype": "text/plain", - "created": 1445811545000, - "id": "658407457438363648", - "language": "en" - }, - { - "content": "RT @TheEllenShow: I told a story from my @OWNTV's #MasterClass on my show. Normally I\u2019d save it all for Sunday, but @Oprah made me. https:\/\u2026", - "contenttype": "text/plain", - "created": 1445804181000, - "id": "658376572521459712", - "language": "en" - }, - { - "content": ".@TheEllenShow is a master teacher of living her truth & living authentically as herself. #MasterClass tonight 8\/7c.\nhttps:\/\/t.co\/iLT2KgRsSw", - "contenttype": "text/plain", - "created": 1445804072000, - "id": "658376116575449088", - "language": "en" - }, - { - "content": ".@SheriSalata , @jonnysinc @part2pictures . Tears of joy and gratitude to you and our entire #BeliefTeam We DID IT!! My heart is full.\ud83d\ude4f\ud83c\udffe\ud83d\ude4f\ud83c\udffe", - "contenttype": "text/plain", - "created": 1445734755000, - "id": "658085377140363264", - "language": "en" - }, - { - "content": "Donna and Bob saw the tape of their story just days before she passed. They appreciated it. #RIPDonna", - "contenttype": "text/plain", - "created": 1445734097000, - "id": "658082618819280896", - "language": "en" - }, - { - "content": "RT @rempower: .@Oprah this series allowed me to slide into people's lives around the world and see the same in them ... we all have a belie\u2026", - "contenttype": "text/plain", - "created": 1445732769000, - "id": "658077046858383360", - "language": "en" - }, - { - "content": "All the stories moved me, My favorite line \" I must pass the stories on to my grandson otherwise our people will loose their way. #Belief", - "contenttype": "text/plain", - "created": 1445732579000, - "id": "658076253618991104", - "language": "en" - }, - { - "content": ".@part2pictures some of your best imagery yet. Filming Alex on the rock.#Belief", - "contenttype": "text/plain", - "created": 1445731782000, - "id": "658072908237934592", - "language": "en" - }, - { - "content": "I just love Alex and his daring #Belief to live fully the present Moment.", - "contenttype": "text/plain", - "created": 1445731561000, - "id": "658071980982206464", - "language": "en" - }, - { - "content": "RT @GrowingOWN: Let's do this! #Belief finale tweet tweet party. Thank you @Oprah! \ud83d\ude4f", - "contenttype": "text/plain", - "created": 1445731248000, - "id": "658070668785770496", - "language": "en" - }, - { - "content": "RT @lizkinnell: The epic finale of #Belief on @OWNTV is about to start. 8\/et Are you ready? What do you Believe?", - "contenttype": "text/plain", - "created": 1445731081000, - "id": "658069968534171648", - "language": "en" - }, - { - "content": "Thank you all for another beautiful night of Belief. Belief runs all day tomorrow for bingers and final episode!", - "contenttype": "text/plain", - "created": 1445648630000, - "id": "657724143115202560", - "language": "en" - }, - { - "content": "RT @OWNingLight: #Belief is the ultimate travel map to mass acceptance. \ud83d\ude4f\ud83c\udffb\u2764\ufe0f\ud83c\udf0d @Oprah", - "contenttype": "text/plain", - "created": 1445647285000, - "id": "657718501147197442", - "language": "en" - }, - { - "content": "\" I can feel my heart opening and faith coming back in\".. What's better than that? #Belief", - "contenttype": "text/plain", - "created": 1445646903000, - "id": "657716901951369218", - "language": "en" - }, - { - "content": "Hey Belief team mates can yo believe how quickly the week has passed? #Belief", - "contenttype": "text/plain", - "created": 1445645633000, - "id": "657711572492533760", - "language": "en" - }, - { - "content": "Ran into @5SOS backstage. Fun times with @TheEllenShow today! https:\/\/t.co\/2PP3W3RzXc", - "contenttype": "text/plain", - "created": 1445618531000, - "id": "657597898394173440", - "language": "en" - }, - { - "content": "Thanks All for another great night of #BELIEF", - "contenttype": "text/plain", - "created": 1445572548000, - "id": "657405031822430208", - "language": "en" - }, - { - "content": "RT @3LWTV: #BecomeWhatYouBelieve New meditation w\/ @Oprah @DeepakChopra begins 11\/2 Register https:\/\/t.co\/x0R9HWTAX0 #Belief https:\/\/t.co\/\u2026", - "contenttype": "text/plain", - "created": 1445571500000, - "id": "657400636745510912", - "language": "en" - }, - { - "content": "Ok west coast let's do it! #belief", - "contenttype": "text/plain", - "created": 1445569367000, - "id": "657391689439404033", - "language": "en" - }, - { - "content": "Thank u kind gentleman who told me I had kale in my teeth. Was eating kale chips with Quincy Jones. Went straight to @LairdLife party.", - "contenttype": "text/plain", - "created": 1445569296000, - "id": "657391393883619328", - "language": "en" - }, - { - "content": "Hello west coast twitterati.. See you at 8 for #Belief", - "contenttype": "text/plain", - "created": 1445566144000, - "id": "657378171872874496", - "language": "en" - }, - { - "content": "Thank you all for another beautiful night.#Belief", - "contenttype": "text/plain", - "created": 1445475948000, - "id": "656999861254918145", - "language": "en" - }, - { - "content": "RT @PRanganathan: \"Transformation is the rule of the game. The universe is not standing still.\" - Marcelo @OWNTV @Oprah #Belief", - "contenttype": "text/plain", - "created": 1445475602000, - "id": "656998409933451264", - "language": "en" - }, - { - "content": "\"The Universe is not standing still.. The whole Universe is expanding\" I love the dance between science and spirituality! #Belief", - "contenttype": "text/plain", - "created": 1445475580000, - "id": "656998320133398528", - "language": "en" - }, - { - "content": "\"Without our prayers and our songs we won't be here\" Apache leader.#Belief", - "contenttype": "text/plain", - "created": 1445473768000, - "id": "656990717504393216", - "language": "en" - }, - { - "content": "Notice her mother crying. She knows its last tine she will ever see her daughter.#Belief", - "contenttype": "text/plain", - "created": 1445473150000, - "id": "656988127433637888", - "language": "en" - }, - { - "content": "This final trial is unbelievable. Every hair gets pulled from her head one at a time. Now that is Something!!#Belief", - "contenttype": "text/plain", - "created": 1445473063000, - "id": "656987763644891136", - "language": "en" - }, - { - "content": "\"What my faith gives me no one can match\"#Belief", - "contenttype": "text/plain", - "created": 1445472961000, - "id": "656987336266223616", - "language": "en" - }, - { - "content": "It's a devotion to faith beyond anything Ive seen. Fascinating.Jain nuns. #Belief", - "contenttype": "text/plain", - "created": 1445472531000, - "id": "656985529951522816", - "language": "en" - }, - { - "content": "I'd never heard of Jain monks and nuns before doing this series. #Belief", - "contenttype": "text/plain", - "created": 1445472393000, - "id": "656984953037586433", - "language": "en" - }, - { - "content": "Good evening Team #Belief the Tweet is on!", - "contenttype": "text/plain", - "created": 1445472098000, - "id": "656983714883239937", - "language": "en" - }, - { - "content": "Thanks everyone for another Epic #Belief night!", - "contenttype": "text/plain", - "created": 1445302792000, - "id": "656273592485810176", - "language": "en" - }, - { - "content": "The Pastor and Imam represent the possibility of PEACE in the world. If they can do it. We can. Peace to All\ud83d\ude4f\ud83c\udffe #Belief", - "contenttype": "text/plain", - "created": 1445302749000, - "id": "656273415280705536", - "language": "en" - }, - { - "content": "We felt privileged to film the Hajj and explore the beauty of Islam.\n#99namesforGod #Belief", - "contenttype": "text/plain", - "created": 1445301110000, - "id": "656266540967424000", - "language": "en" - }, - { - "content": "Do you all believe in \"soul mates\"?\n#Belief", - "contenttype": "text/plain", - "created": 1445300138000, - "id": "656262462426238976", - "language": "en" - }, - { - "content": ".@RevEdBacon thank you so much for hosting tonight's #Belief at All Saints Church.", - "contenttype": "text/plain", - "created": 1445299749000, - "id": "656260832628756480", - "language": "en" - }, - { - "content": "This is one of the best love stories I've ever seen. #belief Ian and Larissa showing us the depths of love.#Belief", - "contenttype": "text/plain", - "created": 1445299614000, - "id": "656260263604310016", - "language": "en" - }, - { - "content": "Hey Everyone .. Tweeting from a bar in Atlanta with @SheriSalata OWN not in my hotel. Anything for #Belief", - "contenttype": "text/plain", - "created": 1445299326000, - "id": "656259057758654464", - "language": "en" - }, - { - "content": "RT @joshuadubois: When you see Ian & Larissa tonight on #BELIEF, you'll know what Christian love is all about. 8pmET, @OWNTV. Tune in! http\u2026", - "contenttype": "text/plain", - "created": 1445295716000, - "id": "656243916224638976", - "language": "en" - }, - { - "content": "RT @KimHaleDance: I Believe LAUGHTER. IS. CONTAGIOUS. What do you believe? See you tonight! 8\/7c.\u2764\ufe0f #Belief #Beliefin3words https:\/\/t.co\/x\u2026", - "contenttype": "text/plain", - "created": 1445295702000, - "id": "656243854610337793", - "language": "en" - }, - { - "content": "RT @OWNTV: See the world through someone else\u2019s soul. The epic journey of #Belief continues tonight at 8\/7c.\nhttps:\/\/t.co\/UKKMHZuC0g", - "contenttype": "text/plain", - "created": 1445295668000, - "id": "656243714507931648", - "language": "en" - }, - { - "content": "RT @OWNTV: Mendel Hurwitz's inquisitive nature grounded him in his faith. See where it's taken him now: https:\/\/t.co\/2iWmNOxK9r https:\/\/t.c\u2026", - "contenttype": "text/plain", - "created": 1445295661000, - "id": "656243684720050176", - "language": "en" - }, - { - "content": "Thank you for opening up the heart space and letting #Belief in. Tonight it keeps getting better. See you at 8\/7c\nhttps:\/\/t.co\/E65vkTray9", - "contenttype": "text/plain", - "created": 1445279425000, - "id": "656175584943341568", - "language": "en" - }, - { - "content": "I believe in the @weightwatchers program so much I decided to invest, join the Board, and partner in #wwfamily evolution.", - "contenttype": "text/plain", - "created": 1445275802000, - "id": "656160388526899200", - "language": "en" - }, - { - "content": "RT @AVAETC: Debut episode of #BELIEF has now aired on both coasts. Trended for 4 hours. Brava, @Oprah + team. 6 beautiful nights to come. B\u2026", - "contenttype": "text/plain", - "created": 1445229489000, - "id": "655966138279432192", - "language": "en" - }, - { - "content": "RT @3LWTV: 6 more epic nights of #Belief to come! See you tomorrow 8pET\/7pCT for the next installment! @OWNTV @Oprah", - "contenttype": "text/plain", - "created": 1445227342000, - "id": "655957135688241152", - "language": "en" - }, - { - "content": "RT @ledisi: I love how Ancestry and Tradition is honored throughout every story in #Belief @OWNTV @OWN Thank you @Oprah this is so importa\u2026", - "contenttype": "text/plain", - "created": 1445225935000, - "id": "655951232981295104", - "language": "en" - }, - { - "content": "RT @UN: Showing #Belief at the UN \"is a bigger dream than I had\" - @Oprah https:\/\/t.co\/VC4OqD8yub #Belief #GlobalGoals https:\/\/t.co\/LZyGuC7\u2026", - "contenttype": "text/plain", - "created": 1445225228000, - "id": "655948267868426240", - "language": "en" - }, - { - "content": "RT @UzoAduba: To seek, to question, to learn, to teach, to pass it on; some of the breathtaking themes running like water throughout #Belie\u2026", - "contenttype": "text/plain", - "created": 1445225008000, - "id": "655947345197076480", - "language": "en" - }, - { - "content": "RT @iamtikasumpter: #Belief had me in awe. Faith in the divine is the constant that links us together. It's all beautiful and righteous. Gi\u2026", - "contenttype": "text/plain", - "created": 1445224852000, - "id": "655946689249828864", - "language": "en" - }, - { - "content": "West Coast... Here we go. #Belief", - "contenttype": "text/plain", - "created": 1445224140000, - "id": "655943701840048128", - "language": "en" - }, - { - "content": "Big surprise at icanady watch party. Epic night. #Belief. So much more to come. https:\/\/t.co\/kBDtFwGyQs", - "contenttype": "text/plain", - "created": 1445220694000, - "id": "655929249669378048", - "language": "en" - }, - { - "content": "I loved the Mendel story so much because it represents right of passasge. \" bye bye to childhood\".#Belief", - "contenttype": "text/plain", - "created": 1445215032000, - "id": "655905500056391682", - "language": "en" - }, - { - "content": "RT @squee_machine: This is a visual feast! Completely gorgeous and I am transfixed. The colors, the composition, cinematography, vibe. #Bel\u2026", - "contenttype": "text/plain", - "created": 1445214538000, - "id": "655903432079904768", - "language": "en" - }, - { - "content": "RT @JamesTyphany: Looking at @ #Belief I really needed this in my life thanks @OWNTV", - "contenttype": "text/plain", - "created": 1445214534000, - "id": "655903413385891840", - "language": "en" - }, - { - "content": "Just surprised a \"watch party\" @icanady 's house. #Belief http:\/\/t.co\/Di0I3OooCh", - "contenttype": "text/plain", - "created": 1445214502000, - "id": "655903277796732931", - "language": "en" - }, - { - "content": "RT @MsLaWandaa: I love moments of sitting among elders and learning. I can feel this moment was special for @ReshThakkar #Belief", - "contenttype": "text/plain", - "created": 1445214498000, - "id": "655903264374812672", - "language": "en" - }, - { - "content": "RT @xonecole: \"Do you have to have religion or is being a good person...is that enough?\" #Belief", - "contenttype": "text/plain", - "created": 1445214339000, - "id": "655902594171203584", - "language": "en" - }, - { - "content": "RT @ChivonJohn: Very inspired by the stories on #belief https:\/\/t.co\/uMRCCfCWcY", - "contenttype": "text/plain", - "created": 1445214327000, - "id": "655902545903140864", - "language": "en" - }, - { - "content": "RT @KiranSArora: powerful personal story that many of us can connect to. searching for what's missing, spiritual liberation. @ReshThakkar #\u2026", - "contenttype": "text/plain", - "created": 1445214128000, - "id": "655901708506103812", - "language": "en" - }, - { - "content": "RT @createdbyerica: \"I'm willing to go as far as I have to go to get that feeling in my heart of being connected to the Divine.\" - Reshma @\u2026", - "contenttype": "text/plain", - "created": 1445213967000, - "id": "655901033952993280", - "language": "en" - }, - { - "content": "RT @UrbnHealthNP: I am enjoying this so much. #Belief", - "contenttype": "text/plain", - "created": 1445213904000, - "id": "655900772467474435", - "language": "en" - }, - { - "content": "RT @DrMABrown: Your relationship with #belief can be completely different than how others experience it", - "contenttype": "text/plain", - "created": 1445213901000, - "id": "655900756604620800", - "language": "en" - }, - { - "content": "RT @ckerfin: On another river on the other side of the world... transition between stories, drawing connections to different beliefs #Belie\u2026", - "contenttype": "text/plain", - "created": 1445213721000, - "id": "655900002644987905", - "language": "en" - }, - { - "content": "RT @nikfarrior: So profound. We are all born into #Belief @Oprah @OWN @OWNTV", - "contenttype": "text/plain", - "created": 1445213706000, - "id": "655899942242775040", - "language": "en" - }, - { - "content": "RT @fgaboys: @Oprah the start of #Belief is riveting and edifying. It makes you want more and inspires you too see what's coming up next. \u2026", - "contenttype": "text/plain", - "created": 1445213699000, - "id": "655899910563217408", - "language": "en" - }, - { - "content": "RT @MalikaGhosh: Here we GO- let's lose ourselves #belief NOW @Oprah JOY http:\/\/t.co\/vYSNLd3LvC", - "contenttype": "text/plain", - "created": 1445212831000, - "id": "655896269542420480", - "language": "en" - }, - { - "content": "RT @MastinKipp: .@Oprah and team are about to bring the Light with #Belief.", - "contenttype": "text/plain", - "created": 1445212747000, - "id": "655895916675600384", - "language": "en" - }, - { - "content": "RT @GrowingOWN: 7 minutes, y'all! #BELIEF", - "contenttype": "text/plain", - "created": 1445212465000, - "id": "655894734968217600", - "language": "en" - }, - { - "content": "RT @LPToussaint: BELIEF defines experience.Choose wisely #Belief Tonight on OWN", - "contenttype": "text/plain", - "created": 1445211845000, - "id": "655892134524878848", - "language": "en" - }, - { - "content": "RT @TheBeBeWinans: Congratulations to my dear friend @Oprah on the launch of your series #BELIEF #Tonight on @OWNTV. Your friendship inspir\u2026", - "contenttype": "text/plain", - "created": 1445211835000, - "id": "655892094905532416", - "language": "en" - }, - { - "content": "RT @UzoAduba: Thirty minutes to @Oprah #Belief. Pretty sure it's about to be our favorite thing.", - "contenttype": "text/plain", - "created": 1445211833000, - "id": "655892084314890240", - "language": "en" - }, - { - "content": "RT @DeandresVoice: Moments away from the start of @SuperSoulSunday and the first night of the epic #Belief series on @OWNTV - are you ready\u2026", - "contenttype": "text/plain", - "created": 1445209201000, - "id": "655881046102142978", - "language": "en" - }, - { - "content": "RT @jennaldewan: I CANT WAIT FOR THIS TONIGHT!!!!!! Got my popcorn, got my tissues I am readyyyyyy! #Belief https:\/\/t.co\/WluTdeEqal", - "contenttype": "text/plain", - "created": 1445209181000, - "id": "655880959535939584", - "language": "en" - }, - { - "content": "RT @indiaarie: U heard about @Oprah passion project? It's called #Belief - I've see some & Its special! Tonight - 7c\/8e - 7 nights!! Whose\u2026", - "contenttype": "text/plain", - "created": 1445208945000, - "id": "655879970732949504", - "language": "en" - }, - { - "content": "Wow, I liked @TheRock before, now I really SEE how special he is. The daughter story was IT for me. So great! #MasterClass", - "contenttype": "text/plain", - "created": 1447639154000, - "id": "666073008692314113", - "language": "en" - }, - { - "content": ".@TheRock how did you Know to listen to your gut and Not go back to football? #Masterclass", - "contenttype": "text/plain", - "created": 1447638226000, - "id": "666069114889179136", - "language": "en" - }, - { - "content": ".@TheRock moving back in with your parents so humbling. \" on the other side of your pain is something good if you can hold on\" #masterclass", - "contenttype": "text/plain", - "created": 1447638067000, - "id": "666068446325665792", - "language": "en" - }, - { - "content": "Wow aren't you loving @TheRock and his candor? #Masterclass", - "contenttype": "text/plain", - "created": 1447637459000, - "id": "666065895932973057", - "language": "en" - }, - { - "content": "RT @patt_t: @TheRock @Oprah @RichOnOWN @OWNTV this interview makes me like you as a fellow human even more for being so real.", - "contenttype": "text/plain", - "created": 1447637030000, - "id": "666064097562247168", - "language": "en" - }, - { - "content": "\"Be You\".. That's the best advice ever @TheRock #MastersClass", - "contenttype": "text/plain", - "created": 1447636205000, - "id": "666060637181644800", - "language": "en" - }, - { - "content": "Supersoulers let's lift our spirits pray and hold Paris in the Light\ud83d\ude4f\ud83c\udffe", - "contenttype": "text/plain", - "created": 1447602477000, - "id": "665919171062927360", - "language": "en" - }, - { - "content": "RT @DeepakChopra: What I learned in week 1: Become What You Believe 21-Day Meditation Experience - https:\/\/t.co\/kqaMaMqEUp #GoogleAlerts", - "contenttype": "text/plain", - "created": 1447098990000, - "id": "663807393063538688", - "language": "en" - }, - { - "content": "Watching Bryan Stevenson on #SuperSoulSunday! \"You are not the worst mistake you ever made\".\nAren't we glad about that.", - "contenttype": "text/plain", - "created": 1446998643000, - "id": "663386507856736257", - "language": "en" - }, - { - "content": ".@CherylStrayed BRAVE ENOUGH my new favorite thing! Gonna buy a copy for all my girls. #Perfectgift https:\/\/t.co\/gz1tnv8t8K", - "contenttype": "text/plain", - "created": 1446915955000, - "id": "663039689360695296", - "language": "en" - }, - { - "content": "Stevie Wonder singing \"Happy Birthday to you!\" to my dear mariashriver. A phenomenal woman and\u2026 https:\/\/t.co\/Ygm5eDIs4f", - "contenttype": "text/plain", - "created": 1446881193000, - "id": "662893888080879616", - "language": "en" - }, - { - "content": "It\u2019s my faaaaavorite time of the Year! For the first time you can shop the list on @amazon! https:\/\/t.co\/a6GMvVrhjN https:\/\/t.co\/sJlQMROq5U", - "contenttype": "text/plain", - "created": 1446744186000, - "id": "662319239844380672", - "language": "en" - }, - { - "content": "Incredible story \"the spirit of the Lord is on you\" thanks for sharing @smokey_robinson #Masterclass", - "contenttype": "text/plain", - "created": 1446428929000, - "id": "660996956861280256", - "language": "en" - }, - { - "content": "Wasnt that incredible story about @smokey_robinson 's dad leaving his family at 12. #MasterClass", - "contenttype": "text/plain", - "created": 1446426630000, - "id": "660987310889041920", - "language": "en" - }, - { - "content": "Gayle, Charlie, Nora @CBSThisMorning Congratulations! #1000thshow", - "contenttype": "text/plain", - "created": 1446220097000, - "id": "660121050978611205", - "language": "en" - }, - { - "content": "I believe your home should rise up to meet you. @TheEllenShow you nailed it with HOME. Tweethearts, grab a copy! https:\/\/t.co\/iFMnpRAsno", - "contenttype": "text/plain", - "created": 1446074433000, - "id": "659510090748182528", - "language": "en" - }, - { - "content": "Can I get a Witness?!\u270b\ud83c\udffe https:\/\/t.co\/tZ1QyAeSdE", - "contenttype": "text/plain", - "created": 1445821114000, - "id": "658447593865945089", - "language": "en" - }, - { - "content": ".@TheEllenShow you're a treasure.\nYour truth set a lot of people free.\n#Masterclass", - "contenttype": "text/plain", - "created": 1445821003000, - "id": "658447130026188800", - "language": "en" - }, - { - "content": "Hope you all are enjoying @TheEllenShow on #MasterClass.", - "contenttype": "text/plain", - "created": 1445820161000, - "id": "658443598313181188", - "language": "en" - }, - { - "content": ".@GloriaSteinem, shero to women everywhere, on how far we\u2019ve come and how far we need to go. #SuperSoulSunday 7p\/6c.\nhttps:\/\/t.co\/3e7oxXW02J", - "contenttype": "text/plain", - "created": 1445811545000, - "id": "658407457438363648", - "language": "en" - }, - { - "content": "RT @TheEllenShow: I told a story from my @OWNTV's #MasterClass on my show. Normally I\u2019d save it all for Sunday, but @Oprah made me. https:\/\u2026", - "contenttype": "text/plain", - "created": 1445804181000, - "id": "658376572521459712", - "language": "en" - }, - { - "content": ".@TheEllenShow is a master teacher of living her truth & living authentically as herself. #MasterClass tonight 8\/7c.\nhttps:\/\/t.co\/iLT2KgRsSw", - "contenttype": "text/plain", - "created": 1445804072000, - "id": "658376116575449088", - "language": "en" - }, - { - "content": ".@SheriSalata , @jonnysinc @part2pictures . Tears of joy and gratitude to you and our entire #BeliefTeam We DID IT!! My heart is full.\ud83d\ude4f\ud83c\udffe\ud83d\ude4f\ud83c\udffe", - "contenttype": "text/plain", - "created": 1445734755000, - "id": "658085377140363264", - "language": "en" - }, - { - "content": "Donna and Bob saw the tape of their story just days before she passed. They appreciated it. #RIPDonna", - "contenttype": "text/plain", - "created": 1445734097000, - "id": "658082618819280896", - "language": "en" - }, - { - "content": "RT @rempower: .@Oprah this series allowed me to slide into people's lives around the world and see the same in them ... we all have a belie\u2026", - "contenttype": "text/plain", - "created": 1445732769000, - "id": "658077046858383360", - "language": "en" - }, - { - "content": "All the stories moved me, My favorite line \" I must pass the stories on to my grandson otherwise our people will loose their way. #Belief", - "contenttype": "text/plain", - "created": 1445732579000, - "id": "658076253618991104", - "language": "en" - }, - { - "content": ".@part2pictures some of your best imagery yet. Filming Alex on the rock.#Belief", - "contenttype": "text/plain", - "created": 1445731782000, - "id": "658072908237934592", - "language": "en" - }, - { - "content": "I just love Alex and his daring #Belief to live fully the present Moment.", - "contenttype": "text/plain", - "created": 1445731561000, - "id": "658071980982206464", - "language": "en" - }, - { - "content": "RT @GrowingOWN: Let's do this! #Belief finale tweet tweet party. Thank you @Oprah! \ud83d\ude4f", - "contenttype": "text/plain", - "created": 1445731248000, - "id": "658070668785770496", - "language": "en" - }, - { - "content": "RT @lizkinnell: The epic finale of #Belief on @OWNTV is about to start. 8\/et Are you ready? What do you Believe?", - "contenttype": "text/plain", - "created": 1445731081000, - "id": "658069968534171648", - "language": "en" - }, - { - "content": "Thank you all for another beautiful night of Belief. Belief runs all day tomorrow for bingers and final episode!", - "contenttype": "text/plain", - "created": 1445648630000, - "id": "657724143115202560", - "language": "en" - }, - { - "content": "RT @OWNingLight: #Belief is the ultimate travel map to mass acceptance. \ud83d\ude4f\ud83c\udffb\u2764\ufe0f\ud83c\udf0d @Oprah", - "contenttype": "text/plain", - "created": 1445647285000, - "id": "657718501147197442", - "language": "en" - }, - { - "content": "\" I can feel my heart opening and faith coming back in\".. What's better than that? #Belief", - "contenttype": "text/plain", - "created": 1445646903000, - "id": "657716901951369218", - "language": "en" - }, - { - "content": "Hey Belief team mates can yo believe how quickly the week has passed? #Belief", - "contenttype": "text/plain", - "created": 1445645633000, - "id": "657711572492533760", - "language": "en" - }, - { - "content": "Ran into @5SOS backstage. Fun times with @TheEllenShow today! https:\/\/t.co\/2PP3W3RzXc", - "contenttype": "text/plain", - "created": 1445618531000, - "id": "657597898394173440", - "language": "en" - }, - { - "content": "Thanks All for another great night of #BELIEF", - "contenttype": "text/plain", - "created": 1445572548000, - "id": "657405031822430208", - "language": "en" - }, - { - "content": "RT @3LWTV: #BecomeWhatYouBelieve New meditation w\/ @Oprah @DeepakChopra begins 11\/2 Register https:\/\/t.co\/x0R9HWTAX0 #Belief https:\/\/t.co\/\u2026", - "contenttype": "text/plain", - "created": 1445571500000, - "id": "657400636745510912", - "language": "en" - }, - { - "content": "Ok west coast let's do it! #belief", - "contenttype": "text/plain", - "created": 1445569367000, - "id": "657391689439404033", - "language": "en" - }, - { - "content": "Thank u kind gentleman who told me I had kale in my teeth. Was eating kale chips with Quincy Jones. Went straight to @LairdLife party.", - "contenttype": "text/plain", - "created": 1445569296000, - "id": "657391393883619328", - "language": "en" - }, - { - "content": "Hello west coast twitterati.. See you at 8 for #Belief", - "contenttype": "text/plain", - "created": 1445566144000, - "id": "657378171872874496", - "language": "en" - }, - { - "content": "Thank you all for another beautiful night.#Belief", - "contenttype": "text/plain", - "created": 1445475948000, - "id": "656999861254918145", - "language": "en" - }, - { - "content": "RT @PRanganathan: \"Transformation is the rule of the game. The universe is not standing still.\" - Marcelo @OWNTV @Oprah #Belief", - "contenttype": "text/plain", - "created": 1445475602000, - "id": "656998409933451264", - "language": "en" - }, - { - "content": "\"The Universe is not standing still.. The whole Universe is expanding\" I love the dance between science and spirituality! #Belief", - "contenttype": "text/plain", - "created": 1445475580000, - "id": "656998320133398528", - "language": "en" - }, - { - "content": "\"Without our prayers and our songs we won't be here\" Apache leader.#Belief", - "contenttype": "text/plain", - "created": 1445473768000, - "id": "656990717504393216", - "language": "en" - }, - { - "content": "Notice her mother crying. She knows its last tine she will ever see her daughter.#Belief", - "contenttype": "text/plain", - "created": 1445473150000, - "id": "656988127433637888", - "language": "en" - }, - { - "content": "This final trial is unbelievable. Every hair gets pulled from her head one at a time. Now that is Something!!#Belief", - "contenttype": "text/plain", - "created": 1445473063000, - "id": "656987763644891136", - "language": "en" - }, - { - "content": "\"What my faith gives me no one can match\"#Belief", - "contenttype": "text/plain", - "created": 1445472961000, - "id": "656987336266223616", - "language": "en" - }, - { - "content": "It's a devotion to faith beyond anything Ive seen. Fascinating.Jain nuns. #Belief", - "contenttype": "text/plain", - "created": 1445472531000, - "id": "656985529951522816", - "language": "en" - }, - { - "content": "I'd never heard of Jain monks and nuns before doing this series. #Belief", - "contenttype": "text/plain", - "created": 1445472393000, - "id": "656984953037586433", - "language": "en" - }, - { - "content": "Good evening Team #Belief the Tweet is on!", - "contenttype": "text/plain", - "created": 1445472098000, - "id": "656983714883239937", - "language": "en" - }, - { - "content": "Thanks everyone for another Epic #Belief night!", - "contenttype": "text/plain", - "created": 1445302792000, - "id": "656273592485810176", - "language": "en" - }, - { - "content": "The Pastor and Imam represent the possibility of PEACE in the world. If they can do it. We can. Peace to All\ud83d\ude4f\ud83c\udffe #Belief", - "contenttype": "text/plain", - "created": 1445302749000, - "id": "656273415280705536", - "language": "en" - }, - { - "content": "We felt privileged to film the Hajj and explore the beauty of Islam.\n#99namesforGod #Belief", - "contenttype": "text/plain", - "created": 1445301110000, - "id": "656266540967424000", - "language": "en" - }, - { - "content": "Do you all believe in \"soul mates\"?\n#Belief", - "contenttype": "text/plain", - "created": 1445300138000, - "id": "656262462426238976", - "language": "en" - }, - { - "content": ".@RevEdBacon thank you so much for hosting tonight's #Belief at All Saints Church.", - "contenttype": "text/plain", - "created": 1445299749000, - "id": "656260832628756480", - "language": "en" - }, - { - "content": "This is one of the best love stories I've ever seen. #belief Ian and Larissa showing us the depths of love.#Belief", - "contenttype": "text/plain", - "created": 1445299614000, - "id": "656260263604310016", - "language": "en" - }, - { - "content": "Hey Everyone .. Tweeting from a bar in Atlanta with @SheriSalata OWN not in my hotel. Anything for #Belief", - "contenttype": "text/plain", - "created": 1445299326000, - "id": "656259057758654464", - "language": "en" - }, - { - "content": "RT @joshuadubois: When you see Ian & Larissa tonight on #BELIEF, you'll know what Christian love is all about. 8pmET, @OWNTV. Tune in! http\u2026", - "contenttype": "text/plain", - "created": 1445295716000, - "id": "656243916224638976", - "language": "en" - }, - { - "content": "RT @KimHaleDance: I Believe LAUGHTER. IS. CONTAGIOUS. What do you believe? See you tonight! 8\/7c.\u2764\ufe0f #Belief #Beliefin3words https:\/\/t.co\/x\u2026", - "contenttype": "text/plain", - "created": 1445295702000, - "id": "656243854610337793", - "language": "en" - }, - { - "content": "RT @OWNTV: See the world through someone else\u2019s soul. The epic journey of #Belief continues tonight at 8\/7c.\nhttps:\/\/t.co\/UKKMHZuC0g", - "contenttype": "text/plain", - "created": 1445295668000, - "id": "656243714507931648", - "language": "en" - }, - { - "content": "RT @OWNTV: Mendel Hurwitz's inquisitive nature grounded him in his faith. See where it's taken him now: https:\/\/t.co\/2iWmNOxK9r https:\/\/t.c\u2026", - "contenttype": "text/plain", - "created": 1445295661000, - "id": "656243684720050176", - "language": "en" - }, - { - "content": "Thank you for opening up the heart space and letting #Belief in. Tonight it keeps getting better. See you at 8\/7c\nhttps:\/\/t.co\/E65vkTray9", - "contenttype": "text/plain", - "created": 1445279425000, - "id": "656175584943341568", - "language": "en" - }, - { - "content": "I believe in the @weightwatchers program so much I decided to invest, join the Board, and partner in #wwfamily evolution.", - "contenttype": "text/plain", - "created": 1445275802000, - "id": "656160388526899200", - "language": "en" - }, - { - "content": "RT @AVAETC: Debut episode of #BELIEF has now aired on both coasts. Trended for 4 hours. Brava, @Oprah + team. 6 beautiful nights to come. B\u2026", - "contenttype": "text/plain", - "created": 1445229489000, - "id": "655966138279432192", - "language": "en" - }, - { - "content": "RT @3LWTV: 6 more epic nights of #Belief to come! See you tomorrow 8pET\/7pCT for the next installment! @OWNTV @Oprah", - "contenttype": "text/plain", - "created": 1445227342000, - "id": "655957135688241152", - "language": "en" - }, - { - "content": "RT @ledisi: I love how Ancestry and Tradition is honored throughout every story in #Belief @OWNTV @OWN Thank you @Oprah this is so importa\u2026", - "contenttype": "text/plain", - "created": 1445225935000, - "id": "655951232981295104", - "language": "en" - }, - { - "content": "RT @UN: Showing #Belief at the UN \"is a bigger dream than I had\" - @Oprah https:\/\/t.co\/VC4OqD8yub #Belief #GlobalGoals https:\/\/t.co\/LZyGuC7\u2026", - "contenttype": "text/plain", - "created": 1445225228000, - "id": "655948267868426240", - "language": "en" - }, - { - "content": "RT @UzoAduba: To seek, to question, to learn, to teach, to pass it on; some of the breathtaking themes running like water throughout #Belie\u2026", - "contenttype": "text/plain", - "created": 1445225008000, - "id": "655947345197076480", - "language": "en" - }, - { - "content": "RT @iamtikasumpter: #Belief had me in awe. Faith in the divine is the constant that links us together. It's all beautiful and righteous. Gi\u2026", - "contenttype": "text/plain", - "created": 1445224852000, - "id": "655946689249828864", - "language": "en" - }, - { - "content": "West Coast... Here we go. #Belief", - "contenttype": "text/plain", - "created": 1445224140000, - "id": "655943701840048128", - "language": "en" - }, - { - "content": "Big surprise at icanady watch party. Epic night. #Belief. So much more to come. https:\/\/t.co\/kBDtFwGyQs", - "contenttype": "text/plain", - "created": 1445220694000, - "id": "655929249669378048", - "language": "en" - }, - { - "content": "I loved the Mendel story so much because it represents right of passasge. \" bye bye to childhood\".#Belief", - "contenttype": "text/plain", - "created": 1445215032000, - "id": "655905500056391682", - "language": "en" - }, - { - "content": "RT @squee_machine: This is a visual feast! Completely gorgeous and I am transfixed. The colors, the composition, cinematography, vibe. #Bel\u2026", - "contenttype": "text/plain", - "created": 1445214538000, - "id": "655903432079904768", - "language": "en" - }, - { - "content": "RT @JamesTyphany: Looking at @ #Belief I really needed this in my life thanks @OWNTV", - "contenttype": "text/plain", - "created": 1445214534000, - "id": "655903413385891840", - "language": "en" - }, - { - "content": "Just surprised a \"watch party\" @icanady 's house. #Belief http:\/\/t.co\/Di0I3OooCh", - "contenttype": "text/plain", - "created": 1445214502000, - "id": "655903277796732931", - "language": "en" - }, - { - "content": "RT @MsLaWandaa: I love moments of sitting among elders and learning. I can feel this moment was special for @ReshThakkar #Belief", - "contenttype": "text/plain", - "created": 1445214498000, - "id": "655903264374812672", - "language": "en" - }, - { - "content": "RT @xonecole: \"Do you have to have religion or is being a good person...is that enough?\" #Belief", - "contenttype": "text/plain", - "created": 1445214339000, - "id": "655902594171203584", - "language": "en" - }, - { - "content": "RT @ChivonJohn: Very inspired by the stories on #belief https:\/\/t.co\/uMRCCfCWcY", - "contenttype": "text/plain", - "created": 1445214327000, - "id": "655902545903140864", - "language": "en" - }, - { - "content": "RT @KiranSArora: powerful personal story that many of us can connect to. searching for what's missing, spiritual liberation. @ReshThakkar #\u2026", - "contenttype": "text/plain", - "created": 1445214128000, - "id": "655901708506103812", - "language": "en" - }, - { - "content": "RT @createdbyerica: \"I'm willing to go as far as I have to go to get that feeling in my heart of being connected to the Divine.\" - Reshma @\u2026", - "contenttype": "text/plain", - "created": 1445213967000, - "id": "655901033952993280", - "language": "en" - }, - { - "content": "RT @UrbnHealthNP: I am enjoying this so much. #Belief", - "contenttype": "text/plain", - "created": 1445213904000, - "id": "655900772467474435", - "language": "en" - }, - { - "content": "RT @DrMABrown: Your relationship with #belief can be completely different than how others experience it", - "contenttype": "text/plain", - "created": 1445213901000, - "id": "655900756604620800", - "language": "en" - }, - { - "content": "RT @ckerfin: On another river on the other side of the world... transition between stories, drawing connections to different beliefs #Belie\u2026", - "contenttype": "text/plain", - "created": 1445213721000, - "id": "655900002644987905", - "language": "en" - }, - { - "content": "RT @nikfarrior: So profound. We are all born into #Belief @Oprah @OWN @OWNTV", - "contenttype": "text/plain", - "created": 1445213706000, - "id": "655899942242775040", - "language": "en" - }, - { - "content": "RT @fgaboys: @Oprah the start of #Belief is riveting and edifying. It makes you want more and inspires you too see what's coming up next. \u2026", - "contenttype": "text/plain", - "created": 1445213699000, - "id": "655899910563217408", - "language": "en" - }, - { - "content": "RT @MalikaGhosh: Here we GO- let's lose ourselves #belief NOW @Oprah JOY http:\/\/t.co\/vYSNLd3LvC", - "contenttype": "text/plain", - "created": 1445212831000, - "id": "655896269542420480", - "language": "en" - }, - { - "content": "RT @MastinKipp: .@Oprah and team are about to bring the Light with #Belief.", - "contenttype": "text/plain", - "created": 1445212747000, - "id": "655895916675600384", - "language": "en" - }, - { - "content": "RT @GrowingOWN: 7 minutes, y'all! #BELIEF", - "contenttype": "text/plain", - "created": 1445212465000, - "id": "655894734968217600", - "language": "en" - }, - { - "content": "RT @LPToussaint: BELIEF defines experience.Choose wisely #Belief Tonight on OWN", - "contenttype": "text/plain", - "created": 1445211845000, - "id": "655892134524878848", - "language": "en" - }, - { - "content": "RT @TheBeBeWinans: Congratulations to my dear friend @Oprah on the launch of your series #BELIEF #Tonight on @OWNTV. Your friendship inspir\u2026", - "contenttype": "text/plain", - "created": 1445211835000, - "id": "655892094905532416", - "language": "en" - }, - { - "content": "RT @UzoAduba: Thirty minutes to @Oprah #Belief. Pretty sure it's about to be our favorite thing.", - "contenttype": "text/plain", - "created": 1445211833000, - "id": "655892084314890240", - "language": "en" - }, - { - "content": "RT @DeandresVoice: Moments away from the start of @SuperSoulSunday and the first night of the epic #Belief series on @OWNTV - are you ready\u2026", - "contenttype": "text/plain", - "created": 1445209201000, - "id": "655881046102142978", - "language": "en" - }, - { - "content": "RT @jennaldewan: I CANT WAIT FOR THIS TONIGHT!!!!!! Got my popcorn, got my tissues I am readyyyyyy! #Belief https:\/\/t.co\/WluTdeEqal", - "contenttype": "text/plain", - "created": 1445209181000, - "id": "655880959535939584", - "language": "en" - }, - { - "content": "RT @indiaarie: U heard about @Oprah passion project? It's called #Belief - I've see some & Its special! Tonight - 7c\/8e - 7 nights!! Whose\u2026", - "contenttype": "text/plain", - "created": 1445208945000, - "id": "655879970732949504", - "language": "en" - }, - { - "content": "Wow, I liked @TheRock before, now I really SEE how special he is. The daughter story was IT for me. So great! #MasterClass", - "contenttype": "text/plain", - "created": 1447639154000, - "id": "666073008692314113", - "language": "en" - }, - { - "content": ".@TheRock how did you Know to listen to your gut and Not go back to football? #Masterclass", - "contenttype": "text/plain", - "created": 1447638226000, - "id": "666069114889179136", - "language": "en" - }, - { - "content": ".@TheRock moving back in with your parents so humbling. \" on the other side of your pain is something good if you can hold on\" #masterclass", - "contenttype": "text/plain", - "created": 1447638067000, - "id": "666068446325665792", - "language": "en" - }, - { - "content": "Wow aren't you loving @TheRock and his candor? #Masterclass", - "contenttype": "text/plain", - "created": 1447637459000, - "id": "666065895932973057", - "language": "en" - }, - { - "content": "RT @patt_t: @TheRock @Oprah @RichOnOWN @OWNTV this interview makes me like you as a fellow human even more for being so real.", - "contenttype": "text/plain", - "created": 1447637030000, - "id": "666064097562247168", - "language": "en" - }, - { - "content": "\"Be You\".. That's the best advice ever @TheRock #MastersClass", - "contenttype": "text/plain", - "created": 1447636205000, - "id": "666060637181644800", - "language": "en" - }, - { - "content": "Supersoulers let's lift our spirits pray and hold Paris in the Light\ud83d\ude4f\ud83c\udffe", - "contenttype": "text/plain", - "created": 1447602477000, - "id": "665919171062927360", - "language": "en" - }, - { - "content": "RT @DeepakChopra: What I learned in week 1: Become What You Believe 21-Day Meditation Experience - https:\/\/t.co\/kqaMaMqEUp #GoogleAlerts", - "contenttype": "text/plain", - "created": 1447098990000, - "id": "663807393063538688", - "language": "en" - }, - { - "content": "Watching Bryan Stevenson on #SuperSoulSunday! \"You are not the worst mistake you ever made\".\nAren't we glad about that.", - "contenttype": "text/plain", - "created": 1446998643000, - "id": "663386507856736257", - "language": "en" - }, - { - "content": ".@CherylStrayed BRAVE ENOUGH my new favorite thing! Gonna buy a copy for all my girls. #Perfectgift https:\/\/t.co\/gz1tnv8t8K", - "contenttype": "text/plain", - "created": 1446915955000, - "id": "663039689360695296", - "language": "en" - }, - { - "content": "Stevie Wonder singing \"Happy Birthday to you!\" to my dear mariashriver. A phenomenal woman and\u2026 https:\/\/t.co\/Ygm5eDIs4f", - "contenttype": "text/plain", - "created": 1446881193000, - "id": "662893888080879616", - "language": "en" - }, - { - "content": "It\u2019s my faaaaavorite time of the Year! For the first time you can shop the list on @amazon! https:\/\/t.co\/a6GMvVrhjN https:\/\/t.co\/sJlQMROq5U", - "contenttype": "text/plain", - "created": 1446744186000, - "id": "662319239844380672", - "language": "en" - }, - { - "content": "Incredible story \"the spirit of the Lord is on you\" thanks for sharing @smokey_robinson #Masterclass", - "contenttype": "text/plain", - "created": 1446428929000, - "id": "660996956861280256", - "language": "en" - }, - { - "content": "Wasnt that incredible story about @smokey_robinson 's dad leaving his family at 12. #MasterClass", - "contenttype": "text/plain", - "created": 1446426630000, - "id": "660987310889041920", - "language": "en" - }, - { - "content": "Gayle, Charlie, Nora @CBSThisMorning Congratulations! #1000thshow", - "contenttype": "text/plain", - "created": 1446220097000, - "id": "660121050978611205", - "language": "en" - }, - { - "content": "I believe your home should rise up to meet you. @TheEllenShow you nailed it with HOME. Tweethearts, grab a copy! https:\/\/t.co\/iFMnpRAsno", - "contenttype": "text/plain", - "created": 1446074433000, - "id": "659510090748182528", - "language": "en" - }, - { - "content": "Can I get a Witness?!\u270b\ud83c\udffe https:\/\/t.co\/tZ1QyAeSdE", - "contenttype": "text/plain", - "created": 1445821114000, - "id": "658447593865945089", - "language": "en" - }, - { - "content": ".@TheEllenShow you're a treasure.\nYour truth set a lot of people free.\n#Masterclass", - "contenttype": "text/plain", - "created": 1445821003000, - "id": "658447130026188800", - "language": "en" - }, - { - "content": "Hope you all are enjoying @TheEllenShow on #MasterClass.", - "contenttype": "text/plain", - "created": 1445820161000, - "id": "658443598313181188", - "language": "en" - }, - { - "content": ".@GloriaSteinem, shero to women everywhere, on how far we\u2019ve come and how far we need to go. #SuperSoulSunday 7p\/6c.\nhttps:\/\/t.co\/3e7oxXW02J", - "contenttype": "text/plain", - "created": 1445811545000, - "id": "658407457438363648", - "language": "en" - }, - { - "content": "RT @TheEllenShow: I told a story from my @OWNTV's #MasterClass on my show. Normally I\u2019d save it all for Sunday, but @Oprah made me. https:\/\u2026", - "contenttype": "text/plain", - "created": 1445804181000, - "id": "658376572521459712", - "language": "en" - }, - { - "content": ".@TheEllenShow is a master teacher of living her truth & living authentically as herself. #MasterClass tonight 8\/7c.\nhttps:\/\/t.co\/iLT2KgRsSw", - "contenttype": "text/plain", - "created": 1445804072000, - "id": "658376116575449088", - "language": "en" - }, - { - "content": ".@SheriSalata , @jonnysinc @part2pictures . Tears of joy and gratitude to you and our entire #BeliefTeam We DID IT!! My heart is full.\ud83d\ude4f\ud83c\udffe\ud83d\ude4f\ud83c\udffe", - "contenttype": "text/plain", - "created": 1445734755000, - "id": "658085377140363264", - "language": "en" - }, - { - "content": "Donna and Bob saw the tape of their story just days before she passed. They appreciated it. #RIPDonna", - "contenttype": "text/plain", - "created": 1445734097000, - "id": "658082618819280896", - "language": "en" - }, - { - "content": "RT @rempower: .@Oprah this series allowed me to slide into people's lives around the world and see the same in them ... we all have a belie\u2026", - "contenttype": "text/plain", - "created": 1445732769000, - "id": "658077046858383360", - "language": "en" - }, - { - "content": "All the stories moved me, My favorite line \" I must pass the stories on to my grandson otherwise our people will loose their way. #Belief", - "contenttype": "text/plain", - "created": 1445732579000, - "id": "658076253618991104", - "language": "en" - }, - { - "content": ".@part2pictures some of your best imagery yet. Filming Alex on the rock.#Belief", - "contenttype": "text/plain", - "created": 1445731782000, - "id": "658072908237934592", - "language": "en" - }, - { - "content": "I just love Alex and his daring #Belief to live fully the present Moment.", - "contenttype": "text/plain", - "created": 1445731561000, - "id": "658071980982206464", - "language": "en" - }, - { - "content": "RT @GrowingOWN: Let's do this! #Belief finale tweet tweet party. Thank you @Oprah! \ud83d\ude4f", - "contenttype": "text/plain", - "created": 1445731248000, - "id": "658070668785770496", - "language": "en" - }, - { - "content": "RT @lizkinnell: The epic finale of #Belief on @OWNTV is about to start. 8\/et Are you ready? What do you Believe?", - "contenttype": "text/plain", - "created": 1445731081000, - "id": "658069968534171648", - "language": "en" - }, - { - "content": "Thank you all for another beautiful night of Belief. Belief runs all day tomorrow for bingers and final episode!", - "contenttype": "text/plain", - "created": 1445648630000, - "id": "657724143115202560", - "language": "en" - }, - { - "content": "RT @OWNingLight: #Belief is the ultimate travel map to mass acceptance. \ud83d\ude4f\ud83c\udffb\u2764\ufe0f\ud83c\udf0d @Oprah", - "contenttype": "text/plain", - "created": 1445647285000, - "id": "657718501147197442", - "language": "en" - }, - { - "content": "\" I can feel my heart opening and faith coming back in\".. What's better than that? #Belief", - "contenttype": "text/plain", - "created": 1445646903000, - "id": "657716901951369218", - "language": "en" - }, - { - "content": "Hey Belief team mates can yo believe how quickly the week has passed? #Belief", - "contenttype": "text/plain", - "created": 1445645633000, - "id": "657711572492533760", - "language": "en" - }, - { - "content": "Ran into @5SOS backstage. Fun times with @TheEllenShow today! https:\/\/t.co\/2PP3W3RzXc", - "contenttype": "text/plain", - "created": 1445618531000, - "id": "657597898394173440", - "language": "en" - }, - { - "content": "Thanks All for another great night of #BELIEF", - "contenttype": "text/plain", - "created": 1445572548000, - "id": "657405031822430208", - "language": "en" - }, - { - "content": "RT @3LWTV: #BecomeWhatYouBelieve New meditation w\/ @Oprah @DeepakChopra begins 11\/2 Register https:\/\/t.co\/x0R9HWTAX0 #Belief https:\/\/t.co\/\u2026", - "contenttype": "text/plain", - "created": 1445571500000, - "id": "657400636745510912", - "language": "en" - }, - { - "content": "Ok west coast let's do it! #belief", - "contenttype": "text/plain", - "created": 1445569367000, - "id": "657391689439404033", - "language": "en" - }, - { - "content": "Thank u kind gentleman who told me I had kale in my teeth. Was eating kale chips with Quincy Jones. Went straight to @LairdLife party.", - "contenttype": "text/plain", - "created": 1445569296000, - "id": "657391393883619328", - "language": "en" - }, - { - "content": "Hello west coast twitterati.. See you at 8 for #Belief", - "contenttype": "text/plain", - "created": 1445566144000, - "id": "657378171872874496", - "language": "en" - }, - { - "content": "Thank you all for another beautiful night.#Belief", - "contenttype": "text/plain", - "created": 1445475948000, - "id": "656999861254918145", - "language": "en" - }, - { - "content": "RT @PRanganathan: \"Transformation is the rule of the game. The universe is not standing still.\" - Marcelo @OWNTV @Oprah #Belief", - "contenttype": "text/plain", - "created": 1445475602000, - "id": "656998409933451264", - "language": "en" - }, - { - "content": "\"The Universe is not standing still.. The whole Universe is expanding\" I love the dance between science and spirituality! #Belief", - "contenttype": "text/plain", - "created": 1445475580000, - "id": "656998320133398528", - "language": "en" - }, - { - "content": "\"Without our prayers and our songs we won't be here\" Apache leader.#Belief", - "contenttype": "text/plain", - "created": 1445473768000, - "id": "656990717504393216", - "language": "en" - }, - { - "content": "Notice her mother crying. She knows its last tine she will ever see her daughter.#Belief", - "contenttype": "text/plain", - "created": 1445473150000, - "id": "656988127433637888", - "language": "en" - }, - { - "content": "This final trial is unbelievable. Every hair gets pulled from her head one at a time. Now that is Something!!#Belief", - "contenttype": "text/plain", - "created": 1445473063000, - "id": "656987763644891136", - "language": "en" - }, - { - "content": "\"What my faith gives me no one can match\"#Belief", - "contenttype": "text/plain", - "created": 1445472961000, - "id": "656987336266223616", - "language": "en" - }, - { - "content": "It's a devotion to faith beyond anything Ive seen. Fascinating.Jain nuns. #Belief", - "contenttype": "text/plain", - "created": 1445472531000, - "id": "656985529951522816", - "language": "en" - }, - { - "content": "I'd never heard of Jain monks and nuns before doing this series. #Belief", - "contenttype": "text/plain", - "created": 1445472393000, - "id": "656984953037586433", - "language": "en" - }, - { - "content": "Good evening Team #Belief the Tweet is on!", - "contenttype": "text/plain", - "created": 1445472098000, - "id": "656983714883239937", - "language": "en" - }, - { - "content": "Thanks everyone for another Epic #Belief night!", - "contenttype": "text/plain", - "created": 1445302792000, - "id": "656273592485810176", - "language": "en" - }, - { - "content": "The Pastor and Imam represent the possibility of PEACE in the world. If they can do it. We can. Peace to All\ud83d\ude4f\ud83c\udffe #Belief", - "contenttype": "text/plain", - "created": 1445302749000, - "id": "656273415280705536", - "language": "en" - }, - { - "content": "We felt privileged to film the Hajj and explore the beauty of Islam.\n#99namesforGod #Belief", - "contenttype": "text/plain", - "created": 1445301110000, - "id": "656266540967424000", - "language": "en" - }, - { - "content": "Do you all believe in \"soul mates\"?\n#Belief", - "contenttype": "text/plain", - "created": 1445300138000, - "id": "656262462426238976", - "language": "en" - }, - { - "content": ".@RevEdBacon thank you so much for hosting tonight's #Belief at All Saints Church.", - "contenttype": "text/plain", - "created": 1445299749000, - "id": "656260832628756480", - "language": "en" - }, - { - "content": "This is one of the best love stories I've ever seen. #belief Ian and Larissa showing us the depths of love.#Belief", - "contenttype": "text/plain", - "created": 1445299614000, - "id": "656260263604310016", - "language": "en" - }, - { - "content": "Hey Everyone .. Tweeting from a bar in Atlanta with @SheriSalata OWN not in my hotel. Anything for #Belief", - "contenttype": "text/plain", - "created": 1445299326000, - "id": "656259057758654464", - "language": "en" - }, - { - "content": "RT @joshuadubois: When you see Ian & Larissa tonight on #BELIEF, you'll know what Christian love is all about. 8pmET, @OWNTV. Tune in! http\u2026", - "contenttype": "text/plain", - "created": 1445295716000, - "id": "656243916224638976", - "language": "en" - }, - { - "content": "RT @KimHaleDance: I Believe LAUGHTER. IS. CONTAGIOUS. What do you believe? See you tonight! 8\/7c.\u2764\ufe0f #Belief #Beliefin3words https:\/\/t.co\/x\u2026", - "contenttype": "text/plain", - "created": 1445295702000, - "id": "656243854610337793", - "language": "en" - }, - { - "content": "RT @OWNTV: See the world through someone else\u2019s soul. The epic journey of #Belief continues tonight at 8\/7c.\nhttps:\/\/t.co\/UKKMHZuC0g", - "contenttype": "text/plain", - "created": 1445295668000, - "id": "656243714507931648", - "language": "en" - }, - { - "content": "RT @OWNTV: Mendel Hurwitz's inquisitive nature grounded him in his faith. See where it's taken him now: https:\/\/t.co\/2iWmNOxK9r https:\/\/t.c\u2026", - "contenttype": "text/plain", - "created": 1445295661000, - "id": "656243684720050176", - "language": "en" - }, - { - "content": "Thank you for opening up the heart space and letting #Belief in. Tonight it keeps getting better. See you at 8\/7c\nhttps:\/\/t.co\/E65vkTray9", - "contenttype": "text/plain", - "created": 1445279425000, - "id": "656175584943341568", - "language": "en" - }, - { - "content": "I believe in the @weightwatchers program so much I decided to invest, join the Board, and partner in #wwfamily evolution.", - "contenttype": "text/plain", - "created": 1445275802000, - "id": "656160388526899200", - "language": "en" - }, - { - "content": "RT @AVAETC: Debut episode of #BELIEF has now aired on both coasts. Trended for 4 hours. Brava, @Oprah + team. 6 beautiful nights to come. B\u2026", - "contenttype": "text/plain", - "created": 1445229489000, - "id": "655966138279432192", - "language": "en" - }, - { - "content": "RT @3LWTV: 6 more epic nights of #Belief to come! See you tomorrow 8pET\/7pCT for the next installment! @OWNTV @Oprah", - "contenttype": "text/plain", - "created": 1445227342000, - "id": "655957135688241152", - "language": "en" - }, - { - "content": "RT @ledisi: I love how Ancestry and Tradition is honored throughout every story in #Belief @OWNTV @OWN Thank you @Oprah this is so importa\u2026", - "contenttype": "text/plain", - "created": 1445225935000, - "id": "655951232981295104", - "language": "en" - }, - { - "content": "RT @UN: Showing #Belief at the UN \"is a bigger dream than I had\" - @Oprah https:\/\/t.co\/VC4OqD8yub #Belief #GlobalGoals https:\/\/t.co\/LZyGuC7\u2026", - "contenttype": "text/plain", - "created": 1445225228000, - "id": "655948267868426240", - "language": "en" - }, - { - "content": "RT @UzoAduba: To seek, to question, to learn, to teach, to pass it on; some of the breathtaking themes running like water throughout #Belie\u2026", - "contenttype": "text/plain", - "created": 1445225008000, - "id": "655947345197076480", - "language": "en" - }, - { - "content": "RT @iamtikasumpter: #Belief had me in awe. Faith in the divine is the constant that links us together. It's all beautiful and righteous. Gi\u2026", - "contenttype": "text/plain", - "created": 1445224852000, - "id": "655946689249828864", - "language": "en" - }, - { - "content": "West Coast... Here we go. #Belief", - "contenttype": "text/plain", - "created": 1445224140000, - "id": "655943701840048128", - "language": "en" - }, - { - "content": "Big surprise at icanady watch party. Epic night. #Belief. So much more to come. https:\/\/t.co\/kBDtFwGyQs", - "contenttype": "text/plain", - "created": 1445220694000, - "id": "655929249669378048", - "language": "en" - }, - { - "content": "I loved the Mendel story so much because it represents right of passasge. \" bye bye to childhood\".#Belief", - "contenttype": "text/plain", - "created": 1445215032000, - "id": "655905500056391682", - "language": "en" - }, - { - "content": "RT @squee_machine: This is a visual feast! Completely gorgeous and I am transfixed. The colors, the composition, cinematography, vibe. #Bel\u2026", - "contenttype": "text/plain", - "created": 1445214538000, - "id": "655903432079904768", - "language": "en" - }, - { - "content": "RT @JamesTyphany: Looking at @ #Belief I really needed this in my life thanks @OWNTV", - "contenttype": "text/plain", - "created": 1445214534000, - "id": "655903413385891840", - "language": "en" - }, - { - "content": "Just surprised a \"watch party\" @icanady 's house. #Belief http:\/\/t.co\/Di0I3OooCh", - "contenttype": "text/plain", - "created": 1445214502000, - "id": "655903277796732931", - "language": "en" - }, - { - "content": "RT @MsLaWandaa: I love moments of sitting among elders and learning. I can feel this moment was special for @ReshThakkar #Belief", - "contenttype": "text/plain", - "created": 1445214498000, - "id": "655903264374812672", - "language": "en" - }, - { - "content": "RT @xonecole: \"Do you have to have religion or is being a good person...is that enough?\" #Belief", - "contenttype": "text/plain", - "created": 1445214339000, - "id": "655902594171203584", - "language": "en" - }, - { - "content": "RT @ChivonJohn: Very inspired by the stories on #belief https:\/\/t.co\/uMRCCfCWcY", - "contenttype": "text/plain", - "created": 1445214327000, - "id": "655902545903140864", - "language": "en" - }, - { - "content": "RT @KiranSArora: powerful personal story that many of us can connect to. searching for what's missing, spiritual liberation. @ReshThakkar #\u2026", - "contenttype": "text/plain", - "created": 1445214128000, - "id": "655901708506103812", - "language": "en" - }, - { - "content": "RT @createdbyerica: \"I'm willing to go as far as I have to go to get that feeling in my heart of being connected to the Divine.\" - Reshma @\u2026", - "contenttype": "text/plain", - "created": 1445213967000, - "id": "655901033952993280", - "language": "en" - }, - { - "content": "RT @UrbnHealthNP: I am enjoying this so much. #Belief", - "contenttype": "text/plain", - "created": 1445213904000, - "id": "655900772467474435", - "language": "en" - }, - { - "content": "RT @DrMABrown: Your relationship with #belief can be completely different than how others experience it", - "contenttype": "text/plain", - "created": 1445213901000, - "id": "655900756604620800", - "language": "en" - }, - { - "content": "RT @ckerfin: On another river on the other side of the world... transition between stories, drawing connections to different beliefs #Belie\u2026", - "contenttype": "text/plain", - "created": 1445213721000, - "id": "655900002644987905", - "language": "en" - }, - { - "content": "RT @nikfarrior: So profound. We are all born into #Belief @Oprah @OWN @OWNTV", - "contenttype": "text/plain", - "created": 1445213706000, - "id": "655899942242775040", - "language": "en" - }, - { - "content": "RT @fgaboys: @Oprah the start of #Belief is riveting and edifying. It makes you want more and inspires you too see what's coming up next. \u2026", - "contenttype": "text/plain", - "created": 1445213699000, - "id": "655899910563217408", - "language": "en" - }, - { - "content": "RT @MalikaGhosh: Here we GO- let's lose ourselves #belief NOW @Oprah JOY http:\/\/t.co\/vYSNLd3LvC", - "contenttype": "text/plain", - "created": 1445212831000, - "id": "655896269542420480", - "language": "en" - }, - { - "content": "RT @MastinKipp: .@Oprah and team are about to bring the Light with #Belief.", - "contenttype": "text/plain", - "created": 1445212747000, - "id": "655895916675600384", - "language": "en" - }, - { - "content": "RT @GrowingOWN: 7 minutes, y'all! #BELIEF", - "contenttype": "text/plain", - "created": 1445212465000, - "id": "655894734968217600", - "language": "en" - }, - { - "content": "RT @LPToussaint: BELIEF defines experience.Choose wisely #Belief Tonight on OWN", - "contenttype": "text/plain", - "created": 1445211845000, - "id": "655892134524878848", - "language": "en" - }, - { - "content": "RT @TheBeBeWinans: Congratulations to my dear friend @Oprah on the launch of your series #BELIEF #Tonight on @OWNTV. Your friendship inspir\u2026", - "contenttype": "text/plain", - "created": 1445211835000, - "id": "655892094905532416", - "language": "en" - }, - { - "content": "RT @UzoAduba: Thirty minutes to @Oprah #Belief. Pretty sure it's about to be our favorite thing.", - "contenttype": "text/plain", - "created": 1445211833000, - "id": "655892084314890240", - "language": "en" - }, - { - "content": "RT @DeandresVoice: Moments away from the start of @SuperSoulSunday and the first night of the epic #Belief series on @OWNTV - are you ready\u2026", - "contenttype": "text/plain", - "created": 1445209201000, - "id": "655881046102142978", - "language": "en" - }, - { - "content": "RT @jennaldewan: I CANT WAIT FOR THIS TONIGHT!!!!!! Got my popcorn, got my tissues I am readyyyyyy! #Belief https:\/\/t.co\/WluTdeEqal", - "contenttype": "text/plain", - "created": 1445209181000, - "id": "655880959535939584", - "language": "en" - }, - { - "content": "RT @indiaarie: U heard about @Oprah passion project? It's called #Belief - I've see some & Its special! Tonight - 7c\/8e - 7 nights!! Whose\u2026", - "contenttype": "text/plain", - "created": 1445208945000, - "id": "655879970732949504", - "language": "en" - }, - { - "content": "Wow, I liked @TheRock before, now I really SEE how special he is. The daughter story was IT for me. So great! #MasterClass", - "contenttype": "text/plain", - "created": 1447639154000, - "id": "666073008692314113", - "language": "en" - }, - { - "content": ".@TheRock how did you Know to listen to your gut and Not go back to football? #Masterclass", - "contenttype": "text/plain", - "created": 1447638226000, - "id": "666069114889179136", - "language": "en" - }, - { - "content": ".@TheRock moving back in with your parents so humbling. \" on the other side of your pain is something good if you can hold on\" #masterclass", - "contenttype": "text/plain", - "created": 1447638067000, - "id": "666068446325665792", - "language": "en" - }, - { - "content": "Wow aren't you loving @TheRock and his candor? #Masterclass", - "contenttype": "text/plain", - "created": 1447637459000, - "id": "666065895932973057", - "language": "en" - }, - { - "content": "RT @patt_t: @TheRock @Oprah @RichOnOWN @OWNTV this interview makes me like you as a fellow human even more for being so real.", - "contenttype": "text/plain", - "created": 1447637030000, - "id": "666064097562247168", - "language": "en" - }, - { - "content": "\"Be You\".. That's the best advice ever @TheRock #MastersClass", - "contenttype": "text/plain", - "created": 1447636205000, - "id": "666060637181644800", - "language": "en" - }, - { - "content": "Supersoulers let's lift our spirits pray and hold Paris in the Light\ud83d\ude4f\ud83c\udffe", - "contenttype": "text/plain", - "created": 1447602477000, - "id": "665919171062927360", - "language": "en" - }, - { - "content": "RT @DeepakChopra: What I learned in week 1: Become What You Believe 21-Day Meditation Experience - https:\/\/t.co\/kqaMaMqEUp #GoogleAlerts", - "contenttype": "text/plain", - "created": 1447098990000, - "id": "663807393063538688", - "language": "en" - }, - { - "content": "Watching Bryan Stevenson on #SuperSoulSunday! \"You are not the worst mistake you ever made\".\nAren't we glad about that.", - "contenttype": "text/plain", - "created": 1446998643000, - "id": "663386507856736257", - "language": "en" - }, - { - "content": ".@CherylStrayed BRAVE ENOUGH my new favorite thing! Gonna buy a copy for all my girls. #Perfectgift https:\/\/t.co\/gz1tnv8t8K", - "contenttype": "text/plain", - "created": 1446915955000, - "id": "663039689360695296", - "language": "en" - }, - { - "content": "Stevie Wonder singing \"Happy Birthday to you!\" to my dear mariashriver. A phenomenal woman and\u2026 https:\/\/t.co\/Ygm5eDIs4f", - "contenttype": "text/plain", - "created": 1446881193000, - "id": "662893888080879616", - "language": "en" - }, - { - "content": "It\u2019s my faaaaavorite time of the Year! For the first time you can shop the list on @amazon! https:\/\/t.co\/a6GMvVrhjN https:\/\/t.co\/sJlQMROq5U", - "contenttype": "text/plain", - "created": 1446744186000, - "id": "662319239844380672", - "language": "en" - }, - { - "content": "Incredible story \"the spirit of the Lord is on you\" thanks for sharing @smokey_robinson #Masterclass", - "contenttype": "text/plain", - "created": 1446428929000, - "id": "660996956861280256", - "language": "en" - }, - { - "content": "Wasnt that incredible story about @smokey_robinson 's dad leaving his family at 12. #MasterClass", - "contenttype": "text/plain", - "created": 1446426630000, - "id": "660987310889041920", - "language": "en" - }, - { - "content": "Gayle, Charlie, Nora @CBSThisMorning Congratulations! #1000thshow", - "contenttype": "text/plain", - "created": 1446220097000, - "id": "660121050978611205", - "language": "en" - }, - { - "content": "I believe your home should rise up to meet you. @TheEllenShow you nailed it with HOME. Tweethearts, grab a copy! https:\/\/t.co\/iFMnpRAsno", - "contenttype": "text/plain", - "created": 1446074433000, - "id": "659510090748182528", - "language": "en" - }, - { - "content": "Can I get a Witness?!\u270b\ud83c\udffe https:\/\/t.co\/tZ1QyAeSdE", - "contenttype": "text/plain", - "created": 1445821114000, - "id": "658447593865945089", - "language": "en" - }, - { - "content": ".@TheEllenShow you're a treasure.\nYour truth set a lot of people free.\n#Masterclass", - "contenttype": "text/plain", - "created": 1445821003000, - "id": "658447130026188800", - "language": "en" - }, - { - "content": "Hope you all are enjoying @TheEllenShow on #MasterClass.", - "contenttype": "text/plain", - "created": 1445820161000, - "id": "658443598313181188", - "language": "en" - }, - { - "content": ".@GloriaSteinem, shero to women everywhere, on how far we\u2019ve come and how far we need to go. #SuperSoulSunday 7p\/6c.\nhttps:\/\/t.co\/3e7oxXW02J", - "contenttype": "text/plain", - "created": 1445811545000, - "id": "658407457438363648", - "language": "en" - }, - { - "content": "RT @TheEllenShow: I told a story from my @OWNTV's #MasterClass on my show. Normally I\u2019d save it all for Sunday, but @Oprah made me. https:\/\u2026", - "contenttype": "text/plain", - "created": 1445804181000, - "id": "658376572521459712", - "language": "en" - }, - { - "content": ".@TheEllenShow is a master teacher of living her truth & living authentically as herself. #MasterClass tonight 8\/7c.\nhttps:\/\/t.co\/iLT2KgRsSw", - "contenttype": "text/plain", - "created": 1445804072000, - "id": "658376116575449088", - "language": "en" - }, - { - "content": ".@SheriSalata , @jonnysinc @part2pictures . Tears of joy and gratitude to you and our entire #BeliefTeam We DID IT!! My heart is full.\ud83d\ude4f\ud83c\udffe\ud83d\ude4f\ud83c\udffe", - "contenttype": "text/plain", - "created": 1445734755000, - "id": "658085377140363264", - "language": "en" - }, - { - "content": "Donna and Bob saw the tape of their story just days before she passed. They appreciated it. #RIPDonna", - "contenttype": "text/plain", - "created": 1445734097000, - "id": "658082618819280896", - "language": "en" - }, - { - "content": "RT @rempower: .@Oprah this series allowed me to slide into people's lives around the world and see the same in them ... we all have a belie\u2026", - "contenttype": "text/plain", - "created": 1445732769000, - "id": "658077046858383360", - "language": "en" - }, - { - "content": "All the stories moved me, My favorite line \" I must pass the stories on to my grandson otherwise our people will loose their way. #Belief", - "contenttype": "text/plain", - "created": 1445732579000, - "id": "658076253618991104", - "language": "en" - }, - { - "content": ".@part2pictures some of your best imagery yet. Filming Alex on the rock.#Belief", - "contenttype": "text/plain", - "created": 1445731782000, - "id": "658072908237934592", - "language": "en" - }, - { - "content": "I just love Alex and his daring #Belief to live fully the present Moment.", - "contenttype": "text/plain", - "created": 1445731561000, - "id": "658071980982206464", - "language": "en" - }, - { - "content": "RT @GrowingOWN: Let's do this! #Belief finale tweet tweet party. Thank you @Oprah! \ud83d\ude4f", - "contenttype": "text/plain", - "created": 1445731248000, - "id": "658070668785770496", - "language": "en" - }, - { - "content": "RT @lizkinnell: The epic finale of #Belief on @OWNTV is about to start. 8\/et Are you ready? What do you Believe?", - "contenttype": "text/plain", - "created": 1445731081000, - "id": "658069968534171648", - "language": "en" - }, - { - "content": "Thank you all for another beautiful night of Belief. Belief runs all day tomorrow for bingers and final episode!", - "contenttype": "text/plain", - "created": 1445648630000, - "id": "657724143115202560", - "language": "en" - }, - { - "content": "RT @OWNingLight: #Belief is the ultimate travel map to mass acceptance. \ud83d\ude4f\ud83c\udffb\u2764\ufe0f\ud83c\udf0d @Oprah", - "contenttype": "text/plain", - "created": 1445647285000, - "id": "657718501147197442", - "language": "en" - }, - { - "content": "\" I can feel my heart opening and faith coming back in\".. What's better than that? #Belief", - "contenttype": "text/plain", - "created": 1445646903000, - "id": "657716901951369218", - "language": "en" - }, - { - "content": "Hey Belief team mates can yo believe how quickly the week has passed? #Belief", - "contenttype": "text/plain", - "created": 1445645633000, - "id": "657711572492533760", - "language": "en" - }, - { - "content": "Ran into @5SOS backstage. Fun times with @TheEllenShow today! https:\/\/t.co\/2PP3W3RzXc", - "contenttype": "text/plain", - "created": 1445618531000, - "id": "657597898394173440", - "language": "en" - }, - { - "content": "Thanks All for another great night of #BELIEF", - "contenttype": "text/plain", - "created": 1445572548000, - "id": "657405031822430208", - "language": "en" - }, - { - "content": "RT @3LWTV: #BecomeWhatYouBelieve New meditation w\/ @Oprah @DeepakChopra begins 11\/2 Register https:\/\/t.co\/x0R9HWTAX0 #Belief https:\/\/t.co\/\u2026", - "contenttype": "text/plain", - "created": 1445571500000, - "id": "657400636745510912", - "language": "en" - }, - { - "content": "Ok west coast let's do it! #belief", - "contenttype": "text/plain", - "created": 1445569367000, - "id": "657391689439404033", - "language": "en" - }, - { - "content": "Thank u kind gentleman who told me I had kale in my teeth. Was eating kale chips with Quincy Jones. Went straight to @LairdLife party.", - "contenttype": "text/plain", - "created": 1445569296000, - "id": "657391393883619328", - "language": "en" - }, - { - "content": "Hello west coast twitterati.. See you at 8 for #Belief", - "contenttype": "text/plain", - "created": 1445566144000, - "id": "657378171872874496", - "language": "en" - }, - { - "content": "Thank you all for another beautiful night.#Belief", - "contenttype": "text/plain", - "created": 1445475948000, - "id": "656999861254918145", - "language": "en" - }, - { - "content": "RT @PRanganathan: \"Transformation is the rule of the game. The universe is not standing still.\" - Marcelo @OWNTV @Oprah #Belief", - "contenttype": "text/plain", - "created": 1445475602000, - "id": "656998409933451264", - "language": "en" - }, - { - "content": "\"The Universe is not standing still.. The whole Universe is expanding\" I love the dance between science and spirituality! #Belief", - "contenttype": "text/plain", - "created": 1445475580000, - "id": "656998320133398528", - "language": "en" - }, - { - "content": "\"Without our prayers and our songs we won't be here\" Apache leader.#Belief", - "contenttype": "text/plain", - "created": 1445473768000, - "id": "656990717504393216", - "language": "en" - }, - { - "content": "Notice her mother crying. She knows its last tine she will ever see her daughter.#Belief", - "contenttype": "text/plain", - "created": 1445473150000, - "id": "656988127433637888", - "language": "en" - }, - { - "content": "This final trial is unbelievable. Every hair gets pulled from her head one at a time. Now that is Something!!#Belief", - "contenttype": "text/plain", - "created": 1445473063000, - "id": "656987763644891136", - "language": "en" - }, - { - "content": "\"What my faith gives me no one can match\"#Belief", - "contenttype": "text/plain", - "created": 1445472961000, - "id": "656987336266223616", - "language": "en" - }, - { - "content": "It's a devotion to faith beyond anything Ive seen. Fascinating.Jain nuns. #Belief", - "contenttype": "text/plain", - "created": 1445472531000, - "id": "656985529951522816", - "language": "en" - }, - { - "content": "I'd never heard of Jain monks and nuns before doing this series. #Belief", - "contenttype": "text/plain", - "created": 1445472393000, - "id": "656984953037586433", - "language": "en" - }, - { - "content": "Good evening Team #Belief the Tweet is on!", - "contenttype": "text/plain", - "created": 1445472098000, - "id": "656983714883239937", - "language": "en" - }, - { - "content": "Thanks everyone for another Epic #Belief night!", - "contenttype": "text/plain", - "created": 1445302792000, - "id": "656273592485810176", - "language": "en" - }, - { - "content": "The Pastor and Imam represent the possibility of PEACE in the world. If they can do it. We can. Peace to All\ud83d\ude4f\ud83c\udffe #Belief", - "contenttype": "text/plain", - "created": 1445302749000, - "id": "656273415280705536", - "language": "en" - }, - { - "content": "We felt privileged to film the Hajj and explore the beauty of Islam.\n#99namesforGod #Belief", - "contenttype": "text/plain", - "created": 1445301110000, - "id": "656266540967424000", - "language": "en" - }, - { - "content": "Do you all believe in \"soul mates\"?\n#Belief", - "contenttype": "text/plain", - "created": 1445300138000, - "id": "656262462426238976", - "language": "en" - }, - { - "content": ".@RevEdBacon thank you so much for hosting tonight's #Belief at All Saints Church.", - "contenttype": "text/plain", - "created": 1445299749000, - "id": "656260832628756480", - "language": "en" - }, - { - "content": "This is one of the best love stories I've ever seen. #belief Ian and Larissa showing us the depths of love.#Belief", - "contenttype": "text/plain", - "created": 1445299614000, - "id": "656260263604310016", - "language": "en" - }, - { - "content": "Hey Everyone .. Tweeting from a bar in Atlanta with @SheriSalata OWN not in my hotel. Anything for #Belief", - "contenttype": "text/plain", - "created": 1445299326000, - "id": "656259057758654464", - "language": "en" - }, - { - "content": "RT @joshuadubois: When you see Ian & Larissa tonight on #BELIEF, you'll know what Christian love is all about. 8pmET, @OWNTV. Tune in! http\u2026", - "contenttype": "text/plain", - "created": 1445295716000, - "id": "656243916224638976", - "language": "en" - }, - { - "content": "RT @KimHaleDance: I Believe LAUGHTER. IS. CONTAGIOUS. What do you believe? See you tonight! 8\/7c.\u2764\ufe0f #Belief #Beliefin3words https:\/\/t.co\/x\u2026", - "contenttype": "text/plain", - "created": 1445295702000, - "id": "656243854610337793", - "language": "en" - }, - { - "content": "RT @OWNTV: See the world through someone else\u2019s soul. The epic journey of #Belief continues tonight at 8\/7c.\nhttps:\/\/t.co\/UKKMHZuC0g", - "contenttype": "text/plain", - "created": 1445295668000, - "id": "656243714507931648", - "language": "en" - }, - { - "content": "RT @OWNTV: Mendel Hurwitz's inquisitive nature grounded him in his faith. See where it's taken him now: https:\/\/t.co\/2iWmNOxK9r https:\/\/t.c\u2026", - "contenttype": "text/plain", - "created": 1445295661000, - "id": "656243684720050176", - "language": "en" - }, - { - "content": "Thank you for opening up the heart space and letting #Belief in. Tonight it keeps getting better. See you at 8\/7c\nhttps:\/\/t.co\/E65vkTray9", - "contenttype": "text/plain", - "created": 1445279425000, - "id": "656175584943341568", - "language": "en" - }, - { - "content": "I believe in the @weightwatchers program so much I decided to invest, join the Board, and partner in #wwfamily evolution.", - "contenttype": "text/plain", - "created": 1445275802000, - "id": "656160388526899200", - "language": "en" - }, - { - "content": "RT @AVAETC: Debut episode of #BELIEF has now aired on both coasts. Trended for 4 hours. Brava, @Oprah + team. 6 beautiful nights to come. B\u2026", - "contenttype": "text/plain", - "created": 1445229489000, - "id": "655966138279432192", - "language": "en" - }, - { - "content": "RT @3LWTV: 6 more epic nights of #Belief to come! See you tomorrow 8pET\/7pCT for the next installment! @OWNTV @Oprah", - "contenttype": "text/plain", - "created": 1445227342000, - "id": "655957135688241152", - "language": "en" - }, - { - "content": "RT @ledisi: I love how Ancestry and Tradition is honored throughout every story in #Belief @OWNTV @OWN Thank you @Oprah this is so importa\u2026", - "contenttype": "text/plain", - "created": 1445225935000, - "id": "655951232981295104", - "language": "en" - }, - { - "content": "RT @UN: Showing #Belief at the UN \"is a bigger dream than I had\" - @Oprah https:\/\/t.co\/VC4OqD8yub #Belief #GlobalGoals https:\/\/t.co\/LZyGuC7\u2026", - "contenttype": "text/plain", - "created": 1445225228000, - "id": "655948267868426240", - "language": "en" - }, - { - "content": "RT @UzoAduba: To seek, to question, to learn, to teach, to pass it on; some of the breathtaking themes running like water throughout #Belie\u2026", - "contenttype": "text/plain", - "created": 1445225008000, - "id": "655947345197076480", - "language": "en" - }, - { - "content": "RT @iamtikasumpter: #Belief had me in awe. Faith in the divine is the constant that links us together. It's all beautiful and righteous. Gi\u2026", - "contenttype": "text/plain", - "created": 1445224852000, - "id": "655946689249828864", - "language": "en" - }, - { - "content": "West Coast... Here we go. #Belief", - "contenttype": "text/plain", - "created": 1445224140000, - "id": "655943701840048128", - "language": "en" - }, - { - "content": "Big surprise at icanady watch party. Epic night. #Belief. So much more to come. https:\/\/t.co\/kBDtFwGyQs", - "contenttype": "text/plain", - "created": 1445220694000, - "id": "655929249669378048", - "language": "en" - }, - { - "content": "I loved the Mendel story so much because it represents right of passasge. \" bye bye to childhood\".#Belief", - "contenttype": "text/plain", - "created": 1445215032000, - "id": "655905500056391682", - "language": "en" - }, - { - "content": "RT @squee_machine: This is a visual feast! Completely gorgeous and I am transfixed. The colors, the composition, cinematography, vibe. #Bel\u2026", - "contenttype": "text/plain", - "created": 1445214538000, - "id": "655903432079904768", - "language": "en" - }, - { - "content": "RT @JamesTyphany: Looking at @ #Belief I really needed this in my life thanks @OWNTV", - "contenttype": "text/plain", - "created": 1445214534000, - "id": "655903413385891840", - "language": "en" - }, - { - "content": "Just surprised a \"watch party\" @icanady 's house. #Belief http:\/\/t.co\/Di0I3OooCh", - "contenttype": "text/plain", - "created": 1445214502000, - "id": "655903277796732931", - "language": "en" - }, - { - "content": "RT @MsLaWandaa: I love moments of sitting among elders and learning. I can feel this moment was special for @ReshThakkar #Belief", - "contenttype": "text/plain", - "created": 1445214498000, - "id": "655903264374812672", - "language": "en" - }, - { - "content": "RT @xonecole: \"Do you have to have religion or is being a good person...is that enough?\" #Belief", - "contenttype": "text/plain", - "created": 1445214339000, - "id": "655902594171203584", - "language": "en" - }, - { - "content": "RT @ChivonJohn: Very inspired by the stories on #belief https:\/\/t.co\/uMRCCfCWcY", - "contenttype": "text/plain", - "created": 1445214327000, - "id": "655902545903140864", - "language": "en" - }, - { - "content": "RT @KiranSArora: powerful personal story that many of us can connect to. searching for what's missing, spiritual liberation. @ReshThakkar #\u2026", - "contenttype": "text/plain", - "created": 1445214128000, - "id": "655901708506103812", - "language": "en" - }, - { - "content": "RT @createdbyerica: \"I'm willing to go as far as I have to go to get that feeling in my heart of being connected to the Divine.\" - Reshma @\u2026", - "contenttype": "text/plain", - "created": 1445213967000, - "id": "655901033952993280", - "language": "en" - }, - { - "content": "RT @UrbnHealthNP: I am enjoying this so much. #Belief", - "contenttype": "text/plain", - "created": 1445213904000, - "id": "655900772467474435", - "language": "en" - }, - { - "content": "RT @DrMABrown: Your relationship with #belief can be completely different than how others experience it", - "contenttype": "text/plain", - "created": 1445213901000, - "id": "655900756604620800", - "language": "en" - }, - { - "content": "RT @ckerfin: On another river on the other side of the world... transition between stories, drawing connections to different beliefs #Belie\u2026", - "contenttype": "text/plain", - "created": 1445213721000, - "id": "655900002644987905", - "language": "en" - }, - { - "content": "RT @nikfarrior: So profound. We are all born into #Belief @Oprah @OWN @OWNTV", - "contenttype": "text/plain", - "created": 1445213706000, - "id": "655899942242775040", - "language": "en" - }, - { - "content": "RT @fgaboys: @Oprah the start of #Belief is riveting and edifying. It makes you want more and inspires you too see what's coming up next. \u2026", - "contenttype": "text/plain", - "created": 1445213699000, - "id": "655899910563217408", - "language": "en" - }, - { - "content": "RT @MalikaGhosh: Here we GO- let's lose ourselves #belief NOW @Oprah JOY http:\/\/t.co\/vYSNLd3LvC", - "contenttype": "text/plain", - "created": 1445212831000, - "id": "655896269542420480", - "language": "en" - }, - { - "content": "RT @MastinKipp: .@Oprah and team are about to bring the Light with #Belief.", - "contenttype": "text/plain", - "created": 1445212747000, - "id": "655895916675600384", - "language": "en" - }, - { - "content": "RT @GrowingOWN: 7 minutes, y'all! #BELIEF", - "contenttype": "text/plain", - "created": 1445212465000, - "id": "655894734968217600", - "language": "en" - }, - { - "content": "RT @LPToussaint: BELIEF defines experience.Choose wisely #Belief Tonight on OWN", - "contenttype": "text/plain", - "created": 1445211845000, - "id": "655892134524878848", - "language": "en" - }, - { - "content": "RT @TheBeBeWinans: Congratulations to my dear friend @Oprah on the launch of your series #BELIEF #Tonight on @OWNTV. Your friendship inspir\u2026", - "contenttype": "text/plain", - "created": 1445211835000, - "id": "655892094905532416", - "language": "en" - }, - { - "content": "RT @UzoAduba: Thirty minutes to @Oprah #Belief. Pretty sure it's about to be our favorite thing.", - "contenttype": "text/plain", - "created": 1445211833000, - "id": "655892084314890240", - "language": "en" - }, - { - "content": "RT @DeandresVoice: Moments away from the start of @SuperSoulSunday and the first night of the epic #Belief series on @OWNTV - are you ready\u2026", - "contenttype": "text/plain", - "created": 1445209201000, - "id": "655881046102142978", - "language": "en" - }, - { - "content": "RT @jennaldewan: I CANT WAIT FOR THIS TONIGHT!!!!!! Got my popcorn, got my tissues I am readyyyyyy! #Belief https:\/\/t.co\/WluTdeEqal", - "contenttype": "text/plain", - "created": 1445209181000, - "id": "655880959535939584", - "language": "en" - }, - { - "content": "RT @indiaarie: U heard about @Oprah passion project? It's called #Belief - I've see some & Its special! Tonight - 7c\/8e - 7 nights!! Whose\u2026", - "contenttype": "text/plain", - "created": 1445208945000, - "id": "655879970732949504", - "language": "en" - }, - { - "content": "Wow, I liked @TheRock before, now I really SEE how special he is. The daughter story was IT for me. So great! #MasterClass", - "contenttype": "text/plain", - "created": 1447639154000, - "id": "666073008692314113", - "language": "en" - }, - { - "content": ".@TheRock how did you Know to listen to your gut and Not go back to football? #Masterclass", - "contenttype": "text/plain", - "created": 1447638226000, - "id": "666069114889179136", - "language": "en" - }, - { - "content": ".@TheRock moving back in with your parents so humbling. \" on the other side of your pain is something good if you can hold on\" #masterclass", - "contenttype": "text/plain", - "created": 1447638067000, - "id": "666068446325665792", - "language": "en" - }, - { - "content": "Wow aren't you loving @TheRock and his candor? #Masterclass", - "contenttype": "text/plain", - "created": 1447637459000, - "id": "666065895932973057", - "language": "en" - }, - { - "content": "RT @patt_t: @TheRock @Oprah @RichOnOWN @OWNTV this interview makes me like you as a fellow human even more for being so real.", - "contenttype": "text/plain", - "created": 1447637030000, - "id": "666064097562247168", - "language": "en" - }, - { - "content": "\"Be You\".. That's the best advice ever @TheRock #MastersClass", - "contenttype": "text/plain", - "created": 1447636205000, - "id": "666060637181644800", - "language": "en" - }, - { - "content": "Supersoulers let's lift our spirits pray and hold Paris in the Light\ud83d\ude4f\ud83c\udffe", - "contenttype": "text/plain", - "created": 1447602477000, - "id": "665919171062927360", - "language": "en" - }, - { - "content": "RT @DeepakChopra: What I learned in week 1: Become What You Believe 21-Day Meditation Experience - https:\/\/t.co\/kqaMaMqEUp #GoogleAlerts", - "contenttype": "text/plain", - "created": 1447098990000, - "id": "663807393063538688", - "language": "en" - }, - { - "content": "Watching Bryan Stevenson on #SuperSoulSunday! \"You are not the worst mistake you ever made\".\nAren't we glad about that.", - "contenttype": "text/plain", - "created": 1446998643000, - "id": "663386507856736257", - "language": "en" - }, - { - "content": ".@CherylStrayed BRAVE ENOUGH my new favorite thing! Gonna buy a copy for all my girls. #Perfectgift https:\/\/t.co\/gz1tnv8t8K", - "contenttype": "text/plain", - "created": 1446915955000, - "id": "663039689360695296", - "language": "en" - }, - { - "content": "Stevie Wonder singing \"Happy Birthday to you!\" to my dear mariashriver. A phenomenal woman and\u2026 https:\/\/t.co\/Ygm5eDIs4f", - "contenttype": "text/plain", - "created": 1446881193000, - "id": "662893888080879616", - "language": "en" - }, - { - "content": "It\u2019s my faaaaavorite time of the Year! For the first time you can shop the list on @amazon! https:\/\/t.co\/a6GMvVrhjN https:\/\/t.co\/sJlQMROq5U", - "contenttype": "text/plain", - "created": 1446744186000, - "id": "662319239844380672", - "language": "en" - }, - { - "content": "Incredible story \"the spirit of the Lord is on you\" thanks for sharing @smokey_robinson #Masterclass", - "contenttype": "text/plain", - "created": 1446428929000, - "id": "660996956861280256", - "language": "en" - }, - { - "content": "Wasnt that incredible story about @smokey_robinson 's dad leaving his family at 12. #MasterClass", - "contenttype": "text/plain", - "created": 1446426630000, - "id": "660987310889041920", - "language": "en" - }, - { - "content": "Gayle, Charlie, Nora @CBSThisMorning Congratulations! #1000thshow", - "contenttype": "text/plain", - "created": 1446220097000, - "id": "660121050978611205", - "language": "en" - }, - { - "content": "I believe your home should rise up to meet you. @TheEllenShow you nailed it with HOME. Tweethearts, grab a copy! https:\/\/t.co\/iFMnpRAsno", - "contenttype": "text/plain", - "created": 1446074433000, - "id": "659510090748182528", - "language": "en" - }, - { - "content": "Can I get a Witness?!\u270b\ud83c\udffe https:\/\/t.co\/tZ1QyAeSdE", - "contenttype": "text/plain", - "created": 1445821114000, - "id": "658447593865945089", - "language": "en" - }, - { - "content": ".@TheEllenShow you're a treasure.\nYour truth set a lot of people free.\n#Masterclass", - "contenttype": "text/plain", - "created": 1445821003000, - "id": "658447130026188800", - "language": "en" - }, - { - "content": "Hope you all are enjoying @TheEllenShow on #MasterClass.", - "contenttype": "text/plain", - "created": 1445820161000, - "id": "658443598313181188", - "language": "en" - }, - { - "content": ".@GloriaSteinem, shero to women everywhere, on how far we\u2019ve come and how far we need to go. #SuperSoulSunday 7p\/6c.\nhttps:\/\/t.co\/3e7oxXW02J", - "contenttype": "text/plain", - "created": 1445811545000, - "id": "658407457438363648", - "language": "en" - }, - { - "content": "RT @TheEllenShow: I told a story from my @OWNTV's #MasterClass on my show. Normally I\u2019d save it all for Sunday, but @Oprah made me. https:\/\u2026", - "contenttype": "text/plain", - "created": 1445804181000, - "id": "658376572521459712", - "language": "en" - }, - { - "content": ".@TheEllenShow is a master teacher of living her truth & living authentically as herself. #MasterClass tonight 8\/7c.\nhttps:\/\/t.co\/iLT2KgRsSw", - "contenttype": "text/plain", - "created": 1445804072000, - "id": "658376116575449088", - "language": "en" - }, - { - "content": ".@SheriSalata , @jonnysinc @part2pictures . Tears of joy and gratitude to you and our entire #BeliefTeam We DID IT!! My heart is full.\ud83d\ude4f\ud83c\udffe\ud83d\ude4f\ud83c\udffe", - "contenttype": "text/plain", - "created": 1445734755000, - "id": "658085377140363264", - "language": "en" - }, - { - "content": "Donna and Bob saw the tape of their story just days before she passed. They appreciated it. #RIPDonna", - "contenttype": "text/plain", - "created": 1445734097000, - "id": "658082618819280896", - "language": "en" - }, - { - "content": "RT @rempower: .@Oprah this series allowed me to slide into people's lives around the world and see the same in them ... we all have a belie\u2026", - "contenttype": "text/plain", - "created": 1445732769000, - "id": "658077046858383360", - "language": "en" - }, - { - "content": "All the stories moved me, My favorite line \" I must pass the stories on to my grandson otherwise our people will loose their way. #Belief", - "contenttype": "text/plain", - "created": 1445732579000, - "id": "658076253618991104", - "language": "en" - }, - { - "content": ".@part2pictures some of your best imagery yet. Filming Alex on the rock.#Belief", - "contenttype": "text/plain", - "created": 1445731782000, - "id": "658072908237934592", - "language": "en" - }, - { - "content": "I just love Alex and his daring #Belief to live fully the present Moment.", - "contenttype": "text/plain", - "created": 1445731561000, - "id": "658071980982206464", - "language": "en" - }, - { - "content": "RT @GrowingOWN: Let's do this! #Belief finale tweet tweet party. Thank you @Oprah! \ud83d\ude4f", - "contenttype": "text/plain", - "created": 1445731248000, - "id": "658070668785770496", - "language": "en" - }, - { - "content": "RT @lizkinnell: The epic finale of #Belief on @OWNTV is about to start. 8\/et Are you ready? What do you Believe?", - "contenttype": "text/plain", - "created": 1445731081000, - "id": "658069968534171648", - "language": "en" - }, - { - "content": "Thank you all for another beautiful night of Belief. Belief runs all day tomorrow for bingers and final episode!", - "contenttype": "text/plain", - "created": 1445648630000, - "id": "657724143115202560", - "language": "en" - }, - { - "content": "RT @OWNingLight: #Belief is the ultimate travel map to mass acceptance. \ud83d\ude4f\ud83c\udffb\u2764\ufe0f\ud83c\udf0d @Oprah", - "contenttype": "text/plain", - "created": 1445647285000, - "id": "657718501147197442", - "language": "en" - }, - { - "content": "\" I can feel my heart opening and faith coming back in\".. What's better than that? #Belief", - "contenttype": "text/plain", - "created": 1445646903000, - "id": "657716901951369218", - "language": "en" - }, - { - "content": "Hey Belief team mates can yo believe how quickly the week has passed? #Belief", - "contenttype": "text/plain", - "created": 1445645633000, - "id": "657711572492533760", - "language": "en" - }, - { - "content": "Ran into @5SOS backstage. Fun times with @TheEllenShow today! https:\/\/t.co\/2PP3W3RzXc", - "contenttype": "text/plain", - "created": 1445618531000, - "id": "657597898394173440", - "language": "en" - }, - { - "content": "Thanks All for another great night of #BELIEF", - "contenttype": "text/plain", - "created": 1445572548000, - "id": "657405031822430208", - "language": "en" - }, - { - "content": "RT @3LWTV: #BecomeWhatYouBelieve New meditation w\/ @Oprah @DeepakChopra begins 11\/2 Register https:\/\/t.co\/x0R9HWTAX0 #Belief https:\/\/t.co\/\u2026", - "contenttype": "text/plain", - "created": 1445571500000, - "id": "657400636745510912", - "language": "en" - }, - { - "content": "Ok west coast let's do it! #belief", - "contenttype": "text/plain", - "created": 1445569367000, - "id": "657391689439404033", - "language": "en" - }, - { - "content": "Thank u kind gentleman who told me I had kale in my teeth. Was eating kale chips with Quincy Jones. Went straight to @LairdLife party.", - "contenttype": "text/plain", - "created": 1445569296000, - "id": "657391393883619328", - "language": "en" - }, - { - "content": "Hello west coast twitterati.. See you at 8 for #Belief", - "contenttype": "text/plain", - "created": 1445566144000, - "id": "657378171872874496", - "language": "en" - }, - { - "content": "Thank you all for another beautiful night.#Belief", - "contenttype": "text/plain", - "created": 1445475948000, - "id": "656999861254918145", - "language": "en" - }, - { - "content": "RT @PRanganathan: \"Transformation is the rule of the game. The universe is not standing still.\" - Marcelo @OWNTV @Oprah #Belief", - "contenttype": "text/plain", - "created": 1445475602000, - "id": "656998409933451264", - "language": "en" - }, - { - "content": "\"The Universe is not standing still.. The whole Universe is expanding\" I love the dance between science and spirituality! #Belief", - "contenttype": "text/plain", - "created": 1445475580000, - "id": "656998320133398528", - "language": "en" - }, - { - "content": "\"Without our prayers and our songs we won't be here\" Apache leader.#Belief", - "contenttype": "text/plain", - "created": 1445473768000, - "id": "656990717504393216", - "language": "en" - }, - { - "content": "Notice her mother crying. She knows its last tine she will ever see her daughter.#Belief", - "contenttype": "text/plain", - "created": 1445473150000, - "id": "656988127433637888", - "language": "en" - }, - { - "content": "This final trial is unbelievable. Every hair gets pulled from her head one at a time. Now that is Something!!#Belief", - "contenttype": "text/plain", - "created": 1445473063000, - "id": "656987763644891136", - "language": "en" - }, - { - "content": "\"What my faith gives me no one can match\"#Belief", - "contenttype": "text/plain", - "created": 1445472961000, - "id": "656987336266223616", - "language": "en" - }, - { - "content": "It's a devotion to faith beyond anything Ive seen. Fascinating.Jain nuns. #Belief", - "contenttype": "text/plain", - "created": 1445472531000, - "id": "656985529951522816", - "language": "en" - }, - { - "content": "I'd never heard of Jain monks and nuns before doing this series. #Belief", - "contenttype": "text/plain", - "created": 1445472393000, - "id": "656984953037586433", - "language": "en" - }, - { - "content": "Good evening Team #Belief the Tweet is on!", - "contenttype": "text/plain", - "created": 1445472098000, - "id": "656983714883239937", - "language": "en" - }, - { - "content": "Thanks everyone for another Epic #Belief night!", - "contenttype": "text/plain", - "created": 1445302792000, - "id": "656273592485810176", - "language": "en" - } - ] -} diff --git a/resources/personality-v3.txt b/resources/personality-v3.txt deleted file mode 100644 index b11508a4e..000000000 --- a/resources/personality-v3.txt +++ /dev/null @@ -1,137 +0,0 @@ -Vice President Johnson, Mr. Speaker, Mr. Chief Justice, President Eisenhower, -Vice President Nixon, President Truman, Reverend Clergy, fellow citizens: - -We observe today not a victory of party but a celebration of freedom -- -symbolizing an end as well as a beginning -- signifying renewal as well as -change. For I have sworn before you and Almighty God the same solemn oath our -forbears prescribed nearly a century and three-quarters ago. - -The world is very different now. For man holds in his mortal hands the power -to abolish all forms of human poverty and all forms of human life. And yet -the same revolutionary beliefs for which our forebears fought are still at -issue around the globe -- the belief that the rights of man come not from the -generosity of the state but from the hand of God. - -We dare not forget today that we are the heirs of that first revolution. Let -the word go forth from this time and place, to friend and foe alike, that the -torch has been passed to a new generation of Americans -- born in this century, -tempered by war, disciplined by a hard and bitter peace, proud of our ancient -heritage -- and unwilling to witness or permit the slow undoing of those human -rights to which this nation has always been committed, and to which we are -committed today at home and around the world. - -Let every nation know, whether it wishes us well or ill, that we shall pay -any price, bear any burden, meet any hardship, support any friend, oppose -any foe to assure the survival and the success of liberty. - -This much we pledge -- and more. - -To those old allies whose cultural and spiritual origins we share, we pledge -the loyalty of faithful friends. United there is little we cannot do in a host -of cooperative ventures. Divided there is little we can do -- for we dare not -meet a powerful challenge at odds and split asunder. - -To those new states whom we welcome to the ranks of the free, we pledge our -word that one form of colonial control shall not have passed away merely to -be replaced by a far more iron tyranny. We shall not always expect to find -them supporting our view. But we shall always hope to find them strongly -supporting their own freedom -- and to remember that, in the past, those who -foolishly sought power by riding the back of the tiger ended up inside. - -To those people in the huts and villages of half the globe struggling to -break the bonds of mass misery, we pledge our best efforts to help them help -themselves, for whatever period is required -- not because the communists may -be doing it, not because we seek their votes, but because it is right. If a -free society cannot help the many who are poor, it cannot save the few who -are rich. - -To our sister republics south of our border, we offer a special pledge -- to -convert our good words into good deeds -- in a new alliance for progress -- -to assist free men and free governments in casting off the chains of poverty. -But this peaceful revolution of hope cannot become the prey of hostile powers. -Let all our neighbors know that we shall join with them to oppose aggression -or subversion anywhere in the Americas. And let every other power know that -this Hemisphere intends to remain the master of its own house. - -To that world assembly of sovereign states, the United Nations, our last best -hope in an age where the instruments of war have far outpaced the instruments -of peace, we renew our pledge of support -- to prevent it from becoming merely -a forum for invective -- to strengthen its shield of the new and the weak -- -and to enlarge the area in which its writ may run. - -Finally, to those nations who would make themselves our adversary, we offer -not a pledge but a request: that both sides begin anew the quest for peace, -before the dark powers of destruction unleashed by science engulf all humanity -in planned or accidental self-destruction. - -We dare not tempt them with weakness. For only when our arms are sufficient -beyond doubt can we be certain beyond doubt that they will never be employed. - -But neither can two great and powerful groups of nations take comfort from -our present course -- both sides overburdened by the cost of modern weapons, -both rightly alarmed by the steady spread of the deadly atom, yet both racing -to alter that uncertain balance of terror that stays the hand of mankind's -final war. - -So let us begin anew -- remembering on both sides that civility is not a sign -of weakness, and sincerity is always subject to proof. Let us never negotiate -out of fear. But let us never fear to negotiate. - -Let both sides explore what problems unite us instead of belaboring those -problems which divide us. - -Let both sides, for the first time, formulate serious and precise proposals -for the inspection and control of arms -- and bring the absolute power to -destroy other nations under the absolute control of all nations. - -Let both sides seek to invoke the wonders of science instead of its terrors. -Together let us explore the stars, conquer the deserts, eradicate disease, -tap the ocean depths and encourage the arts and commerce. - -Let both sides unite to heed in all corners of the earth the command of -Isaiah -- to "undo the heavy burdens ... (and) let the oppressed go free." - -And if a beachhead of cooperation may push back the jungle of suspicion, let -both sides join in creating a new endeavor, not a new balance of power, but -a new world of law, where the strong are just and the weak secure and the -peace preserved. - -All this will not be finished in the first one hundred days. Nor will it be -finished in the first one thousand days, nor in the life of this -Administration, nor even perhaps in our lifetime on this planet. But let us -begin. - -In your hands, my fellow citizens, more than mine, will rest the final success -or failure of our course. Since this country was founded, each generation of -Americans has been summoned to give testimony to its national loyalty. The -graves of young Americans who answered the call to service surround the globe. - -Now the trumpet summons us again -- not as a call to bear arms, though arms we -need -- not as a call to battle, though embattled we are -- but a call to bear -the burden of a long twilight struggle, year in and year out, "rejoicing in -hope, patient in tribulation" -- a struggle against the common enemies of man: -tyranny, poverty, disease and war itself. - -Can we forge against these enemies a grand and global alliance, North and -South, East and West, that can assure a more fruitful life for all mankind? -Will you join in that historic effort? - -In the long history of the world, only a few generations have been granted -the role of defending freedom in its hour of maximum danger. I do not shrink -from this responsibility -- I welcome it. I do not believe that any of us -would exchange places with any other people or any other generation. The -energy, the faith, the devotion which we bring to this endeavor will light -our country and all who serve it -- and the glow from that fire can truly -light the world. - -And so, my fellow Americans: ask not what your country can do for you -- ask -what you can do for your country. - -My fellow citizens of the world: ask not what America will do for you, but -what together we can do for the freedom of man. - -Finally, whether you are citizens of America or citizens of the world, ask of -us here the same high standards of strength and sacrifice which we ask of you. -With a good conscience our only sure reward, with history the final judge of -our deeds, let us go forth to lead the land we love, asking His blessing and -His help, but knowing that here on earth God's work must truly be our own. diff --git a/resources/personality.es.txt b/resources/personality.es.txt deleted file mode 100644 index 950fdb28e..000000000 --- a/resources/personality.es.txt +++ /dev/null @@ -1,13 +0,0 @@ -En un lugar de la Mancha, de cuyo nombre no quiero acordarme, no ha mucho tiempo que vivía un hidalgo de los de lanza en astillero, adarga antigua, rocín flaco y galgo corredor. Una olla de algo más vaca que carnero, salpicón las más noches, duelos y quebrantos los sábados, lantejas los viernes, algún palomino de añadidura los domingos, consumían las tres partes de su hacienda. El resto della concluían sayo de velarte, calzas de velludo para las fiestas, con sus pantuflos de lo mesmo, y los días de entresemana se honraba con su vellorí de lo más fino. Tenía en su casa una ama que pasaba de los cuarenta, y una sobrina que no llegaba a los veinte, y un mozo de campo y plaza, que así ensillaba el rocín como tomaba la podadera. Frisaba la edad de nuestro hidalgo con los cincuenta años; era de complexión recia, seco de carnes, enjuto de rostro, gran madrugador y amigo de la caza. Quieren decir que tenía el sobrenombre de Quijada, o Quesada, que en esto hay alguna diferencia en los autores que deste caso escriben; aunque, por conjeturas verosímiles, se deja entender que se llamaba Quejana. Pero esto importa poco a nuestro cuento; basta que en la narración dél no se salga un punto de la verdad. -Es, pues, de saber que este sobredicho hidalgo, los ratos que estaba ocioso, que eran los más del año, se daba a leer libros de caballerías, con tanta afición y gusto, que olvidó casi de todo punto el ejercicio de la caza, y aun la administración de su hacienda. Y llegó a tanto su curiosidad y desatino en esto, que vendió muchas hanegas de tierra de sembradura para comprar libros de caballerías en que leer, y así, llevó a su casa todos cuantos pudo haber dellos; y de todos, ningunos le parecían tan bien como los que compuso el famoso Feliciano de Silva, porque la claridad de su prosa y aquellas entricadas razones suyas le parecían de perlas, y más cuando llegaba a leer aquellos requiebros y cartas de desafíos, donde en muchas partes hallaba escrito: La razón de la sinrazón que a mi razón se hace, de tal manera mi razón enflaquece, que con razón me quejo de la vuestra fermosura. Y también cuando leía: ...los altos cielos que de vuestra divinidad divinamente con las estrellas os fortifican, y os hacen merecedora del merecimiento que merece la vuestra grandeza. -Con estas razones perdía el pobre caballero el juicio, y desvelábase por entenderlas y desentrañarles el sentido, que no se lo sacara ni las entendiera el mesmo Aristóteles, si resucitara para sólo ello. No estaba muy bien con las heridas que don Belianís daba y recebía, porque se imaginaba que, por grandes maestros que le hubiesen curado, no dejaría de tener el rostro y todo el cuerpo lleno de cicatrices y señales. Pero, con todo, alababa en su autor aquel acabar su libro con la promesa de aquella inacabable aventura, y muchas veces le vino deseo de tomar la pluma y dalle fin al pie de la letra, como allí se promete; y sin duda alguna lo hiciera, y aun saliera con ello, si otros mayores y continuos pensamientos no se lo estorbaran. Tuvo muchas veces competencia con el cura de su lugar —que era hombre docto, graduado en Sigüenza—, sobre cuál había sido mejor caballero: Palmerín de Ingalaterra o Amadís de Gaula; mas maese Nicolás, barbero del mesmo pueblo, decía que ninguno llegaba al Caballero del Febo, y que si alguno se le podía comparar, era don Galaor, hermano de Amadís de Gaula, porque tenía muy acomodada condición para todo; que no era caballero melindroso, ni tan llorón como su hermano, y que en lo de la valentía no le iba en zaga. -En resolución, él se enfrascó tanto en su letura, que se le pasaban las noches leyendo de claro en claro, y los días de turbio en turbio; y así, del poco dormir y del mucho leer, se le secó el celebro, de manera que vino a perder el juicio. Llenósele la fantasía de todo aquello que leía en los libros, así de encantamentos como de pendencias, batallas, desafíos, heridas, requiebros, amores, tormentas y disparates imposibles; y asentósele de tal modo en la imaginación que era verdad toda aquella máquina de aquellas sonadas soñadas invenciones que leía, que para él no había otra historia más cierta en el mundo. Decía él que el Cid Ruy Díaz había sido muy buen caballero, pero que no tenía que ver con el Caballero de la Ardiente Espada, que de sólo un revés había partido por medio dos fieros y descomunales gigantes. Mejor estaba con Bernardo del Carpio, porque en Roncesvalles había muerto a Roldán el encantado, valiéndose de la industria de Hércules, cuando ahogó a Anteo, el hijo de la Tierra, entre los brazos. Decía mucho bien del gigante Morgante, porque, con ser de aquella generación gigantea, que todos son soberbios y descomedidos, él solo era afable y bien criado. Pero, sobre todos, estaba bien con Reinaldos de Montalbán, y más cuando le veía salir de su castillo y robar cuantos topaba, y cuando en allende robó aquel ídolo de Mahoma que era todo de oro, según dice su historia. Diera él, por dar una mano de coces al traidor de Galalón, al ama que tenía, y aun a su sobrina de añadidura. -En efeto, rematado ya su juicio, vino a dar en el más estraño pensamiento que jamás dio loco en el mundo; y fue que le pareció convenible y necesario, así para el aumento de su honra como para el servicio de su república, hacerse caballero andante, y irse por todo el mundo con sus armas y caballo a buscar las aventuras y a ejercitarse en todo aquello que él había leído que los caballeros andantes se ejercitaban, deshaciendo todo género de agravio, y poniéndose en ocasiones y peligros donde, acabándolos, cobrase eterno nombre y fama. Imaginábase el pobre ya coronado por el valor de su brazo, por lo menos, del imperio de Trapisonda; y así, con estos tan agradables pensamientos, llevado del estraño gusto que en ellos sentía, se dio priesa a poner en efeto lo que deseaba. -Y lo primero que hizo fue limpiar unas armas que habían sido de sus bisabuelos, que, tomadas de orín y llenas de moho, luengos siglos había que estaban puestas y olvidadas en un rincón. Limpiólas y aderezólas lo mejor que pudo, pero vio que tenían una gran falta, y era que no tenían celada de encaje, sino morrión simple; mas a esto suplió su industria, porque de cartones hizo un modo de media celada, que, encajada con el morrión, hacían una apariencia de celada entera. Es verdad que para probar si era fuerte y podía estar al riesgo de una cuchillada, sacó su espada y le dio dos golpes, y con el primero y en un punto deshizo lo que había hecho en una semana; y no dejó de parecerle mal la facilidad con que la había hecho pedazos, y, por asegurarse deste peligro, la tornó a hacer de nuevo, poniéndole unas barras de hierro por de dentro, de tal manera que él quedó satisfecho de su fortaleza; y, sin querer hacer nueva experiencia della, la diputó y tuvo por celada finísima de encaje. -Fue luego a ver su rocín, y, aunque tenía más cuartos que un real y más tachas que el caballo de Gonela, que tantum pellis et ossa fuit, le pareció que ni el Bucéfalo de Alejandro ni Babieca el del Cid con él se igualaban. Cuatro días se le pasaron en imaginar qué nombre le pondría; porque, según se decía él a sí mesmo, no era razón que caballo de caballero tan famoso, y tan bueno él por sí, estuviese sin nombre conocido; y ansí, procuraba acomodársele de manera que declarase quién había sido, antes que fuese de caballero andante, y lo que era entonces; pues estaba muy puesto en razón que, mudando su señor estado, mudase él también el nombre, y le cobrase famoso y de estruendo, como convenía a la nueva orden y al nuevo ejercicio que ya profesaba. Y así, después de muchos nombres que formó, borró y quitó, añadió, deshizo y tornó a hacer en su memoria e imaginación, al fin le vino a llamar Rocinante: nombre, a su parecer, alto, sonoro y significativo de lo que había sido cuando fue rocín, antes de lo que ahora era, que era antes y primero de todos los rocines del mundo. -Puesto nombre, y tan a su gusto, a su caballo, quiso ponérsele a sí mismo, y en este pensamiento duró otros ocho días, y al cabo se vino a llamar don Quijote; de donde —como queda dicho— tomaron ocasión los autores desta tan verdadera historia que, sin duda, se debía de llamar Quijada, y no Quesada, como otros quisieron decir. Pero, acordándose que el valeroso Amadís no sólo se había contentado con llamarse Amadís a secas, sino que añadió el nombre de su reino y patria, por Hepila famosa, y se llamó Amadís de Gaula, así quiso, como buen caballero, añadir al suyo el nombre de la suya y llamarse don Quijote de la Mancha, con que, a su parecer, declaraba muy al vivo su linaje y patria, y la honraba con tomar el sobrenombre della. -Limpias, pues, sus armas, hecho del morrión celada, puesto nombre a su rocín y confirmándose a sí mismo, se dio a entender que no le faltaba otra cosa sino buscar una dama de quien enamorarse; porque el caballero andante sin amores era árbol sin hojas y sin fruto y cuerpo sin alma. Decíase él a sí: -— Si yo, por malos de mis pecados, o por mi buena suerte, me encuentro por ahí con algún gigante, como de ordinario les acontece a los caballeros andantes, y le derribo de un encuentro, o le parto por mitad del cuerpo, o, finalmente, le venzo y le rindo, ¿no será bien tener a quien enviarle presentado y que entre y se hinque de rodillas ante mi dulce señora, y diga con voz humilde y rendido: ''Yo, señora, soy el gigante Caraculiambro, señor de la ínsula Malindrania, a quien venció en singular batalla el jamás como se debe alabado caballero don Quijote de la Mancha, el cual me mandó que me presentase ante vuestra merced, para que la vuestra grandeza disponga de mí a su talante''? -¡Oh, cómo se holgó nuestro buen caballero cuando hubo hecho este discurso, y más cuando halló a quien dar nombre de su dama! Y fue, a lo que se cree, que en un lugar cerca del suyo había una moza labradora de muy buen parecer, de quien él un tiempo anduvo enamorado, aunque, según se entiende, ella jamás lo supo, ni le dio cata dello. Llamábase Aldonza Lorenzo, y a ésta le pareció ser bien darle título de señora de sus pensamientos; y, buscándole nombre que no desdijese mucho del suyo, y que tirase y se encaminase al de princesa y gran señora, vino a llamarla Dulcinea del Toboso, porque era natural del Toboso; nombre, a su parecer, músico y peregrino y significativo, como todos los demás que a él y a sus cosas había puesto. - - diff --git a/resources/personality.txt b/resources/personality.txt deleted file mode 100644 index 9bdd68266..000000000 --- a/resources/personality.txt +++ /dev/null @@ -1,15 +0,0 @@ -Call me Ishmael. Some years ago-never mind how long precisely-having little or no money in my purse, and nothing particular to interest me on shore, I thought I would sail about a little and see the watery part of the world. It is a way I have of driving off the spleen and regulating the circulation. Whenever I find myself growing grim about the mouth; whenever it is a damp, drizzly November in my soul; whenever I find myself involuntarily pausing before coffin warehouses, and bringing up the rear of every funeral I meet; and especially whenever my hypos get such an upper hand of me, that it requires a strong moral principle to prevent me from deliberately stepping into the street, and methodically knocking people's hats off-then, I account it high time to get to sea as soon as I can. This is my substitute for pistol and ball. With a philosophical flourish Cato throws himself upon his sword; I quietly take to the ship. There is nothing surprising in this. If they but knew it, almost all men in their degree, some time or other, cherish very nearly the same feelings towards the ocean with me. -There now is your insular city of the Manhattoes, belted round by wharves as Indian isles by coral reefs-commerce surrounds it with her surf. Right and left, the streets take you waterward. Its extreme downtown is the battery, where that noble mole is washed by waves, and cooled by breezes, which a few hours previous were out of sight of land. Look at the crowds of water-gazers there. -Circumambulate the city of a dreamy Sabbath afternoon. Go from Corlears Hook to Coenties Slip, and from thence, by Whitehall, northward. What do you see?-Posted like silent sentinels all around the town, stand thousands upon thousands of mortal men fixed in ocean reveries. Some leaning against the spiles; some seated upon the pier-heads; some looking over the bulwarks of ships from China; some high aloft in the rigging, as if striving to get a still better seaward peep. But these are all landsmen; of week days pent up in lath and plaster-tied to counters, nailed to benches, clinched to desks. How then is this? Are the green fields gone? What do they here? -But look! here come more crowds, pacing straight for the water, and seemingly bound for a dive. Strange! Nothing will content them but the extremest limit of the land; loitering under the shady lee of yonder warehouses will not suffice. No. They must get just as nigh the water as they possibly can without falling in. And there they stand-miles of them-leagues. Inlanders all, they come from lanes and alleys, streets and avenues-north, east, south, and west. Yet here they all unite. Tell me, does the magnetic virtue of the needles of the compasses of all those ships attract them thither? -Once more. Say you are in the country; in some high land of lakes. Take almost any path you please, and ten to one it carries you down in a dale, and leaves you there by a pool in the stream. There is magic in it. Let the most absent-minded of men be plunged in his deepest reveries-stand that man on his legs, set his feet a-going, and he will infallibly lead you to water, if water there be in all that region. Should you ever be athirst in the great American desert, try this experiment, if your caravan happen to be supplied with a metaphysical professor. Yes, as every one knows, meditation and water are wedded for ever. -But here is an artist. He desires to paint you the dreamiest, shadiest, quietest, most enchanting bit of romantic landscape in all the valley of the Saco. What is the chief element he employs? There stand his trees, each with a hollow trunk, as if a hermit and a crucifix were within; and here sleeps his meadow, and there sleep his cattle; and up from yonder cottage goes a sleepy smoke. Deep into distant woodlands winds a mazy way, reaching to overlapping spurs of mountains bathed in their hill-side blue. But though the picture lies thus tranced, and though this pine-tree shakes down its sighs like leaves upon this shepherd's head, yet all were vain, unless the shepherd's eye were fixed upon the magic stream before him. Go visit the Prairies in June, when for scores on scores of miles you wade knee-deep among Tiger-lilies-what is the one charm wanting?-Water-there is not a drop of water there! Were Niagara but a cataract of sand, would you travel your thousand miles to see it? Why did the poor poet of Tennessee, upon suddenly receiving two handfuls of silver, deliberate whether to buy him a coat, which he sadly needed, or invest his money in a pedestrian trip to Rockaway Beach? Why is almost every robust healthy boy with a robust healthy soul in him, at some time or other crazy to go to sea? Why upon your first voyage as a passenger, did you yourself feel such a mystical vibration, when first told that you and your ship were now out of sight of land? Why did the old Persians hold the sea holy? Why did the Greeks give it a separate deity, and own brother of Jove? Surely all this is not without meaning. And still deeper the meaning of that story of Narcissus, who because he could not grasp the tormenting, mild image he saw in the fountain, plunged into it and was drowned. But that same image, we ourselves see in all rivers and oceans. It is the image of the ungraspable phantom of life; and this is the key to it all. -Now, when I say that I am in the habit of going to sea whenever I begin to grow hazy about the eyes, and begin to be over conscious of my lungs, I do not mean to have it inferred that I ever go to sea as a passenger. For to go as a passenger you must needs have a purse, and a purse is but a rag unless you have something in it. Besides, passengers get sea-sick-grow quarrelsome-don't sleep of nights-do not enjoy themselves much, as a general thing;-no, I never go as a passenger; nor, though I am something of a salt, do I ever go to sea as a Commodore, or a Captain, or a Cook. I abandon the glory and distinction of such offices to those who like them. For my part, I abominate all honourable respectable toils, trials, and tribulations of every kind whatsoever. It is quite as much as I can do to take care of myself, without taking care of ships, barques, brigs, schooners, and what not. And as for going as cook,-though I confess there is considerable glory in that, a cook being a sort of officer on ship-board-yet, somehow, I never fancied broiling fowls;-though once broiled, judiciously buttered, and judgmatically salted and peppered, there is no one who will speak more respectfully, not to say reverentially, of a broiled fowl than I will. It is out of the idolatrous dotings of the old Egyptians upon broiled ibis and roasted river horse, that you see the mummies of those creatures in their huge bake-houses the pyramids. -No, when I go to sea, I go as a simple sailor, right before the mast, plumb down into the forecastle, aloft there to the royal mast-head. True, they rather order me about some, and make me jump from spar to spar, like a grasshopper in a May meadow. And at first, this sort of thing is unpleasant enough. It touches one's sense of honour, particularly if you come of an old established family in the land, the Van Rensselaers, or Randolphs, or Hardicanutes. And more than all, if just previous to putting your hand into the tar-pot, you have been lording it as a country schoolmaster, making the tallest boys stand in awe of you. The transition is a keen one, I assure you, from a schoolmaster to a sailor, and requires a strong decoction of Seneca and the Stoics to enable you to grin and bear it. But even this wears off in time. -What of it, if some old hunks of a sea-captain orders me to get a broom and sweep down the decks? What does that indignity amount to, weighed, I mean, in the scales of the New Testament? Do you think the archangel Gabriel thinks anything the less of me, because I promptly and respectfully obey that old hunks in that particular instance? Who ain't a slave? Tell me that. Well, then, however the old sea-captains may order me about-however they may thump and punch me about, I have the satisfaction of knowing that it is all right; that everybody else is one way or other served in much the same way-either in a physical or metaphysical point of view, that is; and so the universal thump is passed round, and all hands should rub each other's shoulder-blades, and be content. -Again, I always go to sea as a sailor, because they make a point of paying me for my trouble, whereas they never pay passengers a single penny that I ever heard of. On the contrary, passengers themselves must pay. And there is all the difference in the world between paying and being paid. The act of paying is perhaps the most uncomfortable infliction that the two orchard thieves entailed upon us. But BEING PAID,-what will compare with it? The urbane activity with which a man receives money is really marvellous, considering that we so earnestly believe money to be the root of all earthly ills, and that on no account can a monied man enter heaven. Ah! how cheerfully we consign ourselves to perdition! -Finally, I always go to sea as a sailor, because of the wholesome exercise and pure air of the fore-castle deck. For as in this world, head winds are far more prevalent than winds from astern (that is, if you never violate the Pythagorean maxim), so for the most part the Commodore on the quarter-deck gets his atmosphere at second hand from the sailors on the forecastle. He thinks he breathes it first; but not so. In much the same way do the commonalty lead their leaders in many other things, at the same time that the leaders little suspect it. But wherefore it was that after having repeatedly smelt the sea as a merchant sailor, I should now take it into my head to go on a whaling voyage; this the invisible police officer of the Fates, who has the constant surveillance of me, and secretly dogs me, and influences me in some unaccountable way-he can better answer than any one else. And, doubtless, my going on this whaling voyage, formed part of the grand programme of Providence that was drawn up a long time ago. It came in as a sort of brief interlude and solo between more extensive performances. I take it that this part of the bill must have run something like this: -"GRAND CONTESTED ELECTION FOR THE PRESIDENCY OF THE UNITED STATES. "WHALING VOYAGE BY ONE ISHMAEL. "BLOODY BATTLE IN AFFGHANISTAN." -Though I cannot tell why it was exactly that those stage managers, the Fates, put me down for this shabby part of a whaling voyage, when others were set down for magnificent parts in high tragedies, and short and easy parts in genteel comedies, and jolly parts in farces-though I cannot tell why this was exactly; yet, now that I recall all the circumstances, I think I can see a little into the springs and motives which being cunningly presented to me under various disguises, induced me to set about performing the part I did, besides cajoling me into the delusion that it was a choice resulting from my own unbiased freewill and discriminating judgment. -Chief among these motives was the overwhelming idea of the great whale himself. Such a portentous and mysterious monster roused all my curiosity. Then the wild and distant seas where he rolled his island bulk; the undeliverable, nameless perils of the whale; these, with all the attending marvels of a thousand Patagonian sights and sounds, helped to sway me to my wish. With other men, perhaps, such things would not have been inducements; but as for me, I am tormented with an everlasting itch for things remote. I love to sail forbidden seas, and land on barbarous coasts. Not ignoring what is good, I am quick to perceive a horror, and could still be social with it-would they let me-since it is but well to be on friendly terms with all the inmates of the place one lodges in. -By reason of these things, then, the whaling voyage was welcome; the great flood-gates of the wonder-world swung open, and in the wild conceits that swayed me to my purpose, two and two there floated into my inmost soul, endless processions of the whale, and, mid most of them all, one grand hooded phantom, like a snow hill in the air. diff --git a/resources/speech_with_pause.wav b/resources/speech_with_pause.wav new file mode 100644 index 000000000..783426cb5 Binary files /dev/null and b/resources/speech_with_pause.wav differ diff --git a/resources/table_test.png b/resources/table_test.png new file mode 100644 index 000000000..e709df6a1 Binary files /dev/null and b/resources/table_test.png differ diff --git a/resources/tone-example-html.json b/resources/tone-example-html.json deleted file mode 100755 index e663b6cfb..000000000 --- a/resources/tone-example-html.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "text": "

Team, I know that times are tough!

Product sales have been disappointing for the past three quarters.

We have a competitive product, but we need to do a better job of selling it!

" -} diff --git a/resources/tone-example.json b/resources/tone-example.json deleted file mode 100755 index c3cc7f90c..000000000 --- a/resources/tone-example.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "text": "Team, I know that times are tough! Product sales have been disappointing for the past three quarters. We have a competitive product, but we need to do a better job of selling it!" -} diff --git a/resources/tone-v3-expect1.json b/resources/tone-v3-expect1.json deleted file mode 100644 index e41cf8529..000000000 --- a/resources/tone-v3-expect1.json +++ /dev/null @@ -1,8680 +0,0 @@ -{ - "document_tone": { - "tone_categories": [ - { - "tones": [ - { - "score": 0.971214, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.546126, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.543228, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.072227, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.057439, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0.53, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0.003, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.55, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.241, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.513, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.467, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.749, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - "sentences_tone": [ - { - "sentence_id": 0, - "text": "Call me Ishmael.", - "input_from": 0, - "input_to": 16, - "tone_categories": [] - }, - { - "sentence_id": 1, - "text": "Some years ago-never mind how long precisely-having little or no money in my purse, and nothing particular to interest me on shore, I thought I would sail about a little and see the watery part of the world.", - "input_from": 17, - "input_to": 224, - "tone_categories": [ - { - "tones": [ - { - "score": 0.170393, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.350151, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.201739, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.114688, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.469036, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0.114, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0.728, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.406, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.166, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.284, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.375, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.92, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 2, - "text": "It is a way I have of driving off the spleen and regulating the circulation.", - "input_from": 225, - "input_to": 301, - "tone_categories": [ - { - "tones": [ - { - "score": 0.335625, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.263686, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.429728, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.20467, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.139387, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.628, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.755, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.253, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.461, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.312, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 3, - "text": "Whenever I find myself growing grim about the mouth; whenever it is a damp, drizzly November in my soul; whenever I find myself involuntarily pausing before coffin warehouses, and bringing up the rear of every funeral I meet; and especially whenever my hypos get such an upper hand of me, that it requires a strong moral principle to prevent me from deliberately stepping into the street, and methodically knocking people's hats off-then, I account it high time to get to sea as soon as I can.", - "input_from": 302, - "input_to": 795, - "tone_categories": [ - { - "tones": [ - { - "score": 0.53187, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.50254, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.36085, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.037935, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.158363, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0.203, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0.008, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.318, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.7, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.444, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.51, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.81, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 4, - "text": "This is my substitute for pistol and ball.", - "input_from": 796, - "input_to": 838, - "tone_categories": [ - { - "tones": [ - { - "score": 0.175965, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.290521, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.215051, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.302646, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.259432, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.569, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.571, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.446, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.56, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.81, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 5, - "text": "With a philosophical flourish Cato throws himself upon his sword; I quietly take to the ship.", - "input_from": 839, - "input_to": 932, - "tone_categories": [ - { - "tones": [ - { - "score": 0.183406, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.518299, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.150604, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.168203, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.307349, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.346, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.888, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.706, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.795, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.107, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 6, - "text": "There is nothing surprising in this.", - "input_from": 933, - "input_to": 969, - "tone_categories": [ - { - "tones": [ - { - "score": 0.202684, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.331177, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.335063, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.249111, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.433038, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.35, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.044, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.795, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.935, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.739, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 7, - "text": "If they but knew it, almost all men in their degree, some time or other, cherish very nearly the same feelings towards the ocean with me.", - "input_from": 970, - "input_to": 1107, - "tone_categories": [ - { - "tones": [ - { - "score": 0.263035, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.203018, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.108853, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.135628, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.430709, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0.605, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0.176, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.315, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.041, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.721, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.707, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.953, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 8, - "text": "There now is your insular city of the Manhattoes, belted round by wharves as Indian isles by coral reefs-commerce surrounds it with her surf.", - "input_from": 1108, - "input_to": 1249, - "tone_categories": [ - { - "tones": [ - { - "score": 0.208645, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.505883, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.139235, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.123256, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.209934, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.591, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.655, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.587, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.5, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.115, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 9, - "text": "Right and left, the streets take you waterward.", - "input_from": 1250, - "input_to": 1297, - "tone_categories": [ - { - "tones": [ - { - "score": 0.296232, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.248731, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.249263, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.315715, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.234019, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.249, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.813, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.832, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.817, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.017, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 10, - "text": "Its extreme downtown is the battery, where that noble mole is washed by waves, and cooled by breezes, which a few hours previous were out of sight of land.", - "input_from": 1298, - "input_to": 1453, - "tone_categories": [ - { - "tones": [ - { - "score": 0.373581, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.556262, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.197002, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.108432, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.158906, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.778, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.484, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.311, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.301, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.261, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 11, - "text": "Look at the crowds of water-gazers there.", - "input_from": 1454, - "input_to": 1495, - "tone_categories": [ - { - "tones": [ - { - "score": 0.098702, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.639292, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.2851, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.124082, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.294147, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.929, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.224, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.337, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.221, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.192, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 12, - "text": "Circumambulate the city of a dreamy Sabbath afternoon.", - "input_from": 1496, - "input_to": 1550, - "tone_categories": [ - { - "tones": [ - { - "score": 0.169689, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.206569, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.181326, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.247856, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.395501, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.975, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.932, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.388, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.137, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.18, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 13, - "text": "Go from Corlears Hook to Coenties Slip, and from thence, by Whitehall, northward.", - "input_from": 1551, - "input_to": 1632, - "tone_categories": [ - { - "tones": [ - { - "score": 0.207906, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.371378, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.280693, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.102245, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.416521, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.93, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.571, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.265, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.234, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.305, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 14, - "text": "What do you see?-Posted like silent sentinels all around the town, stand thousands upon thousands of mortal men fixed in ocean reveries.", - "input_from": 1633, - "input_to": 1769, - "tone_categories": [ - { - "tones": [ - { - "score": 0.262753, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.696676, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.194555, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.15851, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.270896, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0.082, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.351, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.201, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.722, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.628, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.347, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 15, - "text": "Some leaning against the spiles; some seated upon the pier-heads; some looking over the bulwarks of ships from China; some high aloft in the rigging, as if striving to get a still better seaward peep.", - "input_from": 1770, - "input_to": 1970, - "tone_categories": [ - { - "tones": [ - { - "score": 0.382868, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.489318, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.205163, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.118944, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.425947, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0.135, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0.767, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.954, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.691, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.157, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.226, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.243, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 16, - "text": "But these are all landsmen; of week days pent up in lath and plaster-tied to counters, nailed to benches, clinched to desks.", - "input_from": 1971, - "input_to": 2095, - "tone_categories": [ - { - "tones": [ - { - "score": 0.109781, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.348402, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.100454, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.439683, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.396121, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0.493, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.871, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.405, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.274, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.258, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.671, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 17, - "text": "How then is this?", - "input_from": 2096, - "input_to": 2113, - "tone_categories": [ - { - "tones": [ - { - "score": 0.289338, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.487263, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.184789, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.060132, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.370277, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0.847, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.31, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.132, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.157, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.335, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.954, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 18, - "text": "Are the green fields gone?", - "input_from": 2114, - "input_to": 2140, - "tone_categories": [ - { - "tones": [ - { - "score": 0.150856, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.364911, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.294397, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.153937, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.284773, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.418, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.897, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.157, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.342, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.102, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 19, - "text": "What do they here?", - "input_from": 2141, - "input_to": 2159, - "tone_categories": [ - { - "tones": [ - { - "score": 0.298403, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.484869, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.244632, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.119957, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.282312, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.06, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.19, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.93, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.985, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.261, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 20, - "text": "But look! here come more crowds, pacing straight for the water, and seemingly bound for a dive.", - "input_from": 2160, - "input_to": 2255, - "tone_categories": [ - { - "tones": [ - { - "score": 0.081729, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.366571, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.179309, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.336148, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.33228, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0.459, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0.38, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.891, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.286, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.371, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.222, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.528, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 21, - "text": "Strange!", - "input_from": 2256, - "input_to": 2264, - "tone_categories": [] - }, - { - "sentence_id": 22, - "text": "Nothing will content them but the extremest limit of the land; loitering under the shady lee of yonder warehouses will not suffice.", - "input_from": 2265, - "input_to": 2396, - "tone_categories": [ - { - "tones": [ - { - "score": 0.214428, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.400577, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.44209, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.079106, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.253806, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.721, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.202, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.274, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.214, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.671, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 23, - "text": "No.", - "input_from": 2397, - "input_to": 2400, - "tone_categories": [] - }, - { - "sentence_id": 24, - "text": "They must get just as nigh the water as they possibly can without falling in.", - "input_from": 2401, - "input_to": 2478, - "tone_categories": [ - { - "tones": [ - { - "score": 0.149916, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.438289, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.309294, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.103366, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.428773, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0.451, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.556, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.076, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.546, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.511, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.826, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 25, - "text": "And there they stand-miles of them-leagues.", - "input_from": 2479, - "input_to": 2522, - "tone_categories": [ - { - "tones": [ - { - "score": 0.195838, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.609443, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.237532, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.218651, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.238227, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.498, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.201, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.812, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.902, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.23, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 26, - "text": "Inlanders all, they come from lanes and alleys, streets and avenues-north, east, south, and west.", - "input_from": 2523, - "input_to": 2620, - "tone_categories": [ - { - "tones": [ - { - "score": 0.20581, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.242848, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.156057, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.317132, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.284507, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0.72, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.566, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.565, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.72, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.818, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.111, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 27, - "text": "Yet here they all unite.", - "input_from": 2621, - "input_to": 2645, - "tone_categories": [ - { - "tones": [ - { - "score": 0.221384, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.169253, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.151174, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.250741, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.430384, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0.987, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.277, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.267, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.932, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.954, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.287, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 28, - "text": "Tell me, does the magnetic virtue of the needles of the compasses of all those ships attract them thither?", - "input_from": 2646, - "input_to": 2752, - "tone_categories": [ - { - "tones": [ - { - "score": 0.225617, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.571378, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.290246, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.164151, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.183171, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0.597, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.465, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.524, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.823, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.429, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.364, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 29, - "text": "Once more.", - "input_from": 2753, - "input_to": 2763, - "tone_categories": [] - }, - { - "sentence_id": 30, - "text": "Say you are in the country; in some high land of lakes.", - "input_from": 2764, - "input_to": 2819, - "tone_categories": [ - { - "tones": [ - { - "score": 0.141122, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.421809, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.361904, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.202674, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.359623, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0.614, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.603, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.387, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.919, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.785, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.035, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 31, - "text": "Take almost any path you please, and ten to one it carries you down in a dale, and leaves you there by a pool in the stream.", - "input_from": 2820, - "input_to": 2944, - "tone_categories": [ - { - "tones": [ - { - "score": 0.279927, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.343172, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.36336, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.149495, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.305648, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0.733, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.412, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.649, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.748, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.876, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.03, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 32, - "text": "There is magic in it.", - "input_from": 2945, - "input_to": 2966, - "tone_categories": [ - { - "tones": [ - { - "score": 0.151178, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.362646, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.405931, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.202287, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.323321, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.58, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.153, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.393, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.743, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.511, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 33, - "text": "Let the most absent-minded of men be plunged in his deepest reveries-stand that man on his legs, set his feet a-going, and he will infallibly lead you to water, if water there be in all that region.", - "input_from": 2967, - "input_to": 3165, - "tone_categories": [ - { - "tones": [ - { - "score": 0.073153, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.722682, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.45649, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.065335, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.408581, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0.114, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0.255, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.459, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.229, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.9, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.784, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.309, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 34, - "text": "Should you ever be athirst in the great American desert, try this experiment, if your caravan happen to be supplied with a metaphysical professor.", - "input_from": 3166, - "input_to": 3312, - "tone_categories": [ - { - "tones": [ - { - "score": 0.252295, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.52585, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.234371, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.112877, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.175748, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0.275, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0.199, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.661, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.392, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.591, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.412, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.389, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 35, - "text": "Yes, as every one knows, meditation and water are wedded for ever.", - "input_from": 3313, - "input_to": 3379, - "tone_categories": [ - { - "tones": [ - { - "score": 0.174186, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.248523, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.148391, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.25751, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.475705, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0.675, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0.786, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.749, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.316, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.261, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.133, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.636, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 36, - "text": "But here is an artist.", - "input_from": 3380, - "input_to": 3402, - "tone_categories": [ - { - "tones": [ - { - "score": 0.188722, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.138485, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.171406, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.293563, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.528097, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.615, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.03, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.393, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.552, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.844, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 37, - "text": "He desires to paint you the dreamiest, shadiest, quietest, most enchanting bit of romantic landscape in all the valley of the Saco.", - "input_from": 3403, - "input_to": 3534, - "tone_categories": [ - { - "tones": [ - { - "score": 0.115039, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.136932, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.228761, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.323535, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.433443, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0.493, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.735, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.761, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.804, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.545, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.136, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 38, - "text": "What is the chief element he employs?", - "input_from": 3535, - "input_to": 3572, - "tone_categories": [ - { - "tones": [ - { - "score": 0.398249, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.351877, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.410105, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.088988, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.129349, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.372, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.519, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.351, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.423, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.278, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 39, - "text": "There stand his trees, each with a hollow trunk, as if a hermit and a crucifix were within; and here sleeps his meadow, and there sleep his cattle; and up from yonder cottage goes a sleepy smoke.", - "input_from": 3573, - "input_to": 3768, - "tone_categories": [ - { - "tones": [ - { - "score": 0.265136, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.796105, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.075884, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.126968, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.210043, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0.114, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.601, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.518, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.753, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.872, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.214, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 40, - "text": "Deep into distant woodlands winds a mazy way, reaching to overlapping spurs of mountains bathed in their hill-side blue.", - "input_from": 3769, - "input_to": 3889, - "tone_categories": [ - { - "tones": [ - { - "score": 0.118054, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.375256, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.54878, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.12193, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.235122, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.773, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.711, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.6, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.578, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.185, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 41, - "text": "But though the picture lies thus tranced, and though this pine-tree shakes down its sighs like leaves upon this shepherd's head, yet all were vain, unless the shepherd's eye were fixed upon the magic stream before him.", - "input_from": 3890, - "input_to": 4108, - "tone_categories": [ - { - "tones": [ - { - "score": 0.441053, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.262616, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.138243, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.023707, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.530394, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0.273, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0.11, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.43, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.166, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.771, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.595, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.739, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 42, - "text": "Go visit the Prairies in June, when for scores on scores of miles you wade knee-deep among Tiger-lilies-what is the one charm wanting?-Water-there is not a drop of water there!", - "input_from": 4109, - "input_to": 4285, - "tone_categories": [ - { - "tones": [ - { - "score": 0.37904, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.175941, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.260338, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.402414, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.162226, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.528, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.558, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.567, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.67, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.148, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 43, - "text": "Were Niagara but a cataract of sand, would you travel your thousand miles to see it?", - "input_from": 4286, - "input_to": 4370, - "tone_categories": [ - { - "tones": [ - { - "score": 0.033412, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.429261, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.434537, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.380345, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.262543, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.139, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.067, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.756, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.768, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.568, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 44, - "text": "Why did the poor poet of Tennessee, upon suddenly receiving two handfuls of silver, deliberate whether to buy him a coat, which he sadly needed, or invest his money in a pedestrian trip to Rockaway Beach?", - "input_from": 4371, - "input_to": 4575, - "tone_categories": [ - { - "tones": [ - { - "score": 0.267462, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.61645, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.060459, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.083649, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.46912, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0.019, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0.065, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.641, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.446, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.433, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.277, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.39, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 45, - "text": "Why is almost every robust healthy boy with a robust healthy soul in him, at some time or other crazy to go to sea?", - "input_from": 4576, - "input_to": 4691, - "tone_categories": [ - { - "tones": [ - { - "score": 0.222434, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.151632, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.146582, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.199168, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.371779, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0.614, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.671, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.173, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.692, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.379, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.772, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 46, - "text": "Why upon your first voyage as a passenger, did you yourself feel such a mystical vibration, when first told that you and your ship were now out of sight of land?", - "input_from": 4692, - "input_to": 4853, - "tone_categories": [ - { - "tones": [ - { - "score": 0.289226, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.403452, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.388116, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.148897, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.177373, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0.099, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.175, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.446, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.872, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.875, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.031, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 47, - "text": "Why did the old Persians hold the sea holy?", - "input_from": 4854, - "input_to": 4897, - "tone_categories": [ - { - "tones": [ - { - "score": 0.156871, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.440361, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.372559, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.076162, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.261716, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.531, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.286, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.138, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.128, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.879, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 48, - "text": "Why did the Greeks give it a separate deity, and own brother of Jove?", - "input_from": 4898, - "input_to": 4967, - "tone_categories": [ - { - "tones": [ - { - "score": 0.372514, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.425748, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.326713, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.097709, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.306402, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.652, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.508, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.517, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.384, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.498, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 49, - "text": "Surely all this is not without meaning.", - "input_from": 4968, - "input_to": 5007, - "tone_categories": [ - { - "tones": [ - { - "score": 0.237539, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.227237, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.376581, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.069574, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.540447, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0.886, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0.997, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.401, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.001, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.832, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.376, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.992, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 50, - "text": "And still deeper the meaning of that story of Narcissus, who because he could not grasp the tormenting, mild image he saw in the fountain, plunged into it and was drowned.", - "input_from": 5008, - "input_to": 5179, - "tone_categories": [ - { - "tones": [ - { - "score": 0.079256, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.671545, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.535755, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.023619, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.420106, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0.732, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0.449, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.708, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.133, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.591, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.445, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.739, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 51, - "text": "But that same image, we ourselves see in all rivers and oceans.", - "input_from": 5180, - "input_to": 5243, - "tone_categories": [ - { - "tones": [ - { - "score": 0.066081, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.227092, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.2573, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.269542, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.654919, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0.786, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.128, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.016, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.935, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.954, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.961, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 52, - "text": "It is the image of the ungraspable phantom of life; and this is the key to it all.", - "input_from": 5244, - "input_to": 5326, - "tone_categories": [ - { - "tones": [ - { - "score": 0.046707, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.140698, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.645008, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.165148, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.333413, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0.6, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.956, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.789, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.367, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.193, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.238, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 53, - "text": "Now, when I say that I am in the habit of going to sea whenever I begin to grow hazy about the eyes, and begin to be over conscious of my lungs, I do not mean to have it inferred that I ever go to sea as a passenger.", - "input_from": 5327, - "input_to": 5543, - "tone_categories": [ - { - "tones": [ - { - "score": 0.405999, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.206239, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.216264, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.299023, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.319107, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0.275, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0.196, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.281, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.405, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.476, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.52, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.853, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 54, - "text": "For to go as a passenger you must needs have a purse, and a purse is but a rag unless you have something in it.", - "input_from": 5544, - "input_to": 5655, - "tone_categories": [ - { - "tones": [ - { - "score": 0.242366, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.313293, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.391356, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.202589, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.276341, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0.053, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.442, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.16, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.627, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.465, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.598, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 55, - "text": "Besides, passengers get sea-sick-grow quarrelsome-don't sleep of nights-do not enjoy themselves much, as a general thing;-no, I never go as a passenger; nor, though I am something of a salt, do I ever go to sea as a Commodore, or a Captain, or a Cook.", - "input_from": 5656, - "input_to": 5907, - "tone_categories": [ - { - "tones": [ - { - "score": 0.393608, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.510843, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.296177, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.071568, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.302687, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0.066, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0.772, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.436, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.229, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.349, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.313, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.826, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 56, - "text": "I abandon the glory and distinction of such offices to those who like them.", - "input_from": 5908, - "input_to": 5983, - "tone_categories": [ - { - "tones": [ - { - "score": 0.179585, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.479747, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.424013, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.246049, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.1726, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0.289, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.153, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.276, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.762, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.847, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.888, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 57, - "text": "For my part, I abominate all honourable respectable toils, trials, and tribulations of every kind whatsoever.", - "input_from": 5984, - "input_to": 6093, - "tone_categories": [ - { - "tones": [ - { - "score": 0.357501, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.34783, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.29798, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.095727, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.332466, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0.847, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.352, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.402, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.368, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.441, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.932, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 58, - "text": "It is quite as much as I can do to take care of myself, without taking care of ships, barques, brigs, schooners, and what not.", - "input_from": 6094, - "input_to": 6220, - "tone_categories": [ - { - "tones": [ - { - "score": 0.311786, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.246754, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.205102, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.310913, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.413132, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.223, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.179, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.352, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.507, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.909, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 59, - "text": "And as for going as cook,-though I confess there is considerable glory in that, a cook being a sort of officer on ship-board-yet, somehow, I never fancied broiling fowls;-though once broiled, judiciously buttered, and judgmatically salted and peppered, there is no one who will speak more respectfully, not to say reverentially, of a broiled fowl than I will.", - "input_from": 6221, - "input_to": 6580, - "tone_categories": [ - { - "tones": [ - { - "score": 0.366891, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.435328, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.256416, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.037071, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.473926, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.487, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.25, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.564, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.453, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.73, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 60, - "text": "It is out of the idolatrous dotings of the old Egyptians upon broiled ibis and roasted river horse, that you see the mummies of those creatures in their huge bake-houses the pyramids.", - "input_from": 6581, - "input_to": 6764, - "tone_categories": [ - { - "tones": [ - { - "score": 0.068849, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.670484, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.565229, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.072999, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.194397, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.713, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.521, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.694, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.546, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.136, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 61, - "text": "No, when I go to sea, I go as a simple sailor, right before the mast, plumb down into the forecastle, aloft there to the royal mast-head.", - "input_from": 6765, - "input_to": 6902, - "tone_categories": [ - { - "tones": [ - { - "score": 0.202455, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.368053, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.487951, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.116903, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.240054, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.63, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.761, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.23, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.425, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.493, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 62, - "text": "True, they rather order me about some, and make me jump from spar to spar, like a grasshopper in a May meadow.", - "input_from": 6903, - "input_to": 7013, - "tone_categories": [ - { - "tones": [ - { - "score": 0.114608, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.180568, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.127396, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.552524, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.21591, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0.913, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.392, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.35, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.573, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.659, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.814, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 63, - "text": "And at first, this sort of thing is unpleasant enough.", - "input_from": 7014, - "input_to": 7068, - "tone_categories": [ - { - "tones": [ - { - "score": 0.172106, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.323635, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.405992, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.168903, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.287174, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0.715, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.885, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.218, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.059, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.19, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.894, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 64, - "text": "It touches one's sense of honour, particularly if you come of an old established family in the land, the Van Rensselaers, or Randolphs, or Hardicanutes.", - "input_from": 7069, - "input_to": 7221, - "tone_categories": [ - { - "tones": [ - { - "score": 0.245789, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.355261, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.290643, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.112017, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.277867, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0.866, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0.571, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.495, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.291, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.557, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.608, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.671, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 65, - "text": "And more than all, if just previous to putting your hand into the tar-pot, you have been lording it as a country schoolmaster, making the tallest boys stand in awe of you.", - "input_from": 7222, - "input_to": 7393, - "tone_categories": [ - { - "tones": [ - { - "score": 0.107358, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.727051, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.242276, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.116783, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.176322, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0.155, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0.08, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.612, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.35, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.806, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.569, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.213, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 66, - "text": "The transition is a keen one, I assure you, from a schoolmaster to a sailor, and requires a strong decoction of Seneca and the Stoics to enable you to grin and bear it.", - "input_from": 7394, - "input_to": 7562, - "tone_categories": [ - { - "tones": [ - { - "score": 0.381463, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.493127, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.524356, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.061184, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.233586, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0.155, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0.298, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.583, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.916, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.746, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.729, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.109, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 67, - "text": "But even this wears off in time.", - "input_from": 7563, - "input_to": 7595, - "tone_categories": [ - { - "tones": [ - { - "score": 0.15969, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.501692, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.184826, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.114453, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.347395, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0.841, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.603, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.01, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.297, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.847, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.97, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 68, - "text": "What of it, if some old hunks of a sea-captain orders me to get a broom and sweep down the decks?", - "input_from": 7596, - "input_to": 7693, - "tone_categories": [ - { - "tones": [ - { - "score": 0.349233, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.150259, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.448867, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.14003, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.233611, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0.364, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0.284, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.791, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.498, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.153, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.275, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.685, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 69, - "text": "What does that indignity amount to, weighed, I mean, in the scales of the New Testament?", - "input_from": 7694, - "input_to": 7782, - "tone_categories": [ - { - "tones": [ - { - "score": 0.114981, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.341251, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.232329, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.372385, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.282898, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.815, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.571, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.329, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.18, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.68, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 70, - "text": "Do you think the archangel Gabriel thinks anything the less of me, because I promptly and respectfully obey that old hunks in that particular instance?", - "input_from": 7783, - "input_to": 7934, - "tone_categories": [ - { - "tones": [ - { - "score": 0.117679, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.425065, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.606104, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.040868, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.459945, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0.821, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0.196, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.326, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.266, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.39, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.412, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.846, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 71, - "text": "Who ain't a slave?", - "input_from": 7935, - "input_to": 7953, - "tone_categories": [ - { - "tones": [ - { - "score": 0.243072, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.332116, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.450842, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.11269, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.202439, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.9, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.932, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.606, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.194, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.061, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 72, - "text": "Tell me that.", - "input_from": 7954, - "input_to": 7967, - "tone_categories": [] - }, - { - "sentence_id": 73, - "text": "Well, then, however the old sea-captains may order me about-however they may thump and punch me about, I have the satisfaction of knowing that it is all right; that everybody else is one way or other served in much the same way-either in a physical or metaphysical point of view, that is; and so the universal thump is passed round, and all hands should rub each other's shoulder-blades, and be content.", - "input_from": 7968, - "input_to": 8371, - "tone_categories": [ - { - "tones": [ - { - "score": 0.600225, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.188614, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.342122, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.051428, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.309914, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0.296, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0.005, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.566, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.261, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.455, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.372, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.792, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 74, - "text": "Again, I always go to sea as a sailor, because they make a point of paying me for my trouble, whereas they never pay passengers a single penny that I ever heard of.", - "input_from": 8372, - "input_to": 8536, - "tone_categories": [ - { - "tones": [ - { - "score": 0.399134, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.426051, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.191353, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.133474, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.336895, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0.69, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0.659, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.195, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.23, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.499, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.371, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.895, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 75, - "text": "On the contrary, passengers themselves must pay.", - "input_from": 8537, - "input_to": 8585, - "tone_categories": [ - { - "tones": [ - { - "score": 0.176823, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.441884, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.245443, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.135024, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.265692, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0.984, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0.723, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.242, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.433, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.501, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.655, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.155, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 76, - "text": "And there is all the difference in the world between paying and being paid.", - "input_from": 8586, - "input_to": 8661, - "tone_categories": [ - { - "tones": [ - { - "score": 0.20078, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.215978, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.097787, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.302586, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.483807, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0.723, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.967, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.475, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.14, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.392, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.224, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 77, - "text": "The act of paying is perhaps the most uncomfortable infliction that the two orchard thieves entailed upon us.", - "input_from": 8662, - "input_to": 8771, - "tone_categories": [ - { - "tones": [ - { - "score": 0.272819, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.637609, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.45609, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.038838, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.143311, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0.346, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.667, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.494, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.286, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.288, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.525, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 78, - "text": "But BEING PAID,-what will compare with it?", - "input_from": 8772, - "input_to": 8814, - "tone_categories": [ - { - "tones": [ - { - "score": 0.129291, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.168215, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.505291, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.172874, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.445413, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0.978, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.525, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.027, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.07, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.214, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.957, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 79, - "text": "The urbane activity with which a man receives money is really marvellous, considering that we so earnestly believe money to be the root of all earthly ills, and that on no account can a monied man enter heaven.", - "input_from": 8815, - "input_to": 9025, - "tone_categories": [ - { - "tones": [ - { - "score": 0.258157, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.362913, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.209787, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.224406, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.206747, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.623, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.284, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.637, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.265, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.563, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 80, - "text": "Ah! how cheerfully we consign ourselves to perdition!", - "input_from": 9026, - "input_to": 9079, - "tone_categories": [ - { - "tones": [ - { - "score": 0.326175, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.279526, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.280562, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.081405, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.159875, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.031, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.18, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.959, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.979, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.862, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 81, - "text": "Finally, I always go to sea as a sailor, because of the wholesome exercise and pure air of the fore-castle deck.", - "input_from": 9080, - "input_to": 9192, - "tone_categories": [ - { - "tones": [ - { - "score": 0.10393, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.110797, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.194602, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.691458, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.203747, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0.563, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0.543, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.775, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.663, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.283, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.19, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.655, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 82, - "text": "For as in this world, head winds are far more prevalent than winds from astern (that is, if you never violate the Pythagorean maxim), so for the most part the Commodore on the quarter-deck gets his atmosphere at second hand from the sailors on the forecastle.", - "input_from": 9193, - "input_to": 9452, - "tone_categories": [ - { - "tones": [ - { - "score": 0.190926, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.563901, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.379399, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.039081, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.386472, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0.066, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0.167, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.835, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.569, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.551, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.391, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.166, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 83, - "text": "He thinks he breathes it first; but not so.", - "input_from": 9453, - "input_to": 9496, - "tone_categories": [ - { - "tones": [ - { - "score": 0.146976, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.264925, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.429378, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.140581, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.474616, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0.779, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.032, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.035, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.763, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.927, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.925, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 84, - "text": "In much the same way do the commonalty lead their leaders in many other things, at the same time that the leaders little suspect it.", - "input_from": 9497, - "input_to": 9629, - "tone_categories": [ - { - "tones": [ - { - "score": 0.223986, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.530082, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.343833, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.063121, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.278164, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0.257, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0.571, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.899, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.752, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.411, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.255, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.23, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 85, - "text": "But wherefore it was that after having repeatedly smelt the sea as a merchant sailor, I should now take it into my head to go on a whaling voyage; this the invisible police officer of the Fates, who has the constant surveillance of me, and secretly dogs me, and influences me in some unaccountable way-he can better answer than any one else.", - "input_from": 9630, - "input_to": 9971, - "tone_categories": [ - { - "tones": [ - { - "score": 0.421184, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.707532, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.530845, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.014768, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.092379, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0.688, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0.008, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.506, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.429, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.239, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.422, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.808, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 86, - "text": "And, doubtless, my going on this whaling voyage, formed part of the grand programme of Providence that was drawn up a long time ago.", - "input_from": 9972, - "input_to": 10104, - "tone_categories": [ - { - "tones": [ - { - "score": 0.259643, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.433288, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.221091, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.234673, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.23225, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0.199, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.509, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.569, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.443, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.581, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.565, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 87, - "text": "It came in as a sort of brief interlude and solo between more extensive performances.", - "input_from": 10105, - "input_to": 10190, - "tone_categories": [ - { - "tones": [ - { - "score": 0.220237, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.488841, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.346516, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.32806, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.174339, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0.451, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.922, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.534, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.24, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.432, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.536, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 88, - "text": "I take it that this part of the bill must have run something like this:\n\"GRAND CONTESTED ELECTION FOR THE PRESIDENCY OF THE UNITED STATES.", - "input_from": 10191, - "input_to": 10329, - "tone_categories": [ - { - "tones": [ - { - "score": 0.118164, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.52867, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.454139, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.167206, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.198501, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0.065, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.374, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.345, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.446, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.536, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.739, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 89, - "text": "\"WHALING VOYAGE BY ONE ISHMAEL.", - "input_from": 10330, - "input_to": 10361, - "tone_categories": [ - { - "tones": [ - { - "score": 0.242882, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.212707, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.251869, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.217312, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.219939, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.912, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.571, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.035, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.119, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.401, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 90, - "text": "\"BLOODY BATTLE IN AFFGHANISTAN.\"", - "input_from": 10362, - "input_to": 10394, - "tone_categories": [ - { - "tones": [ - { - "score": 0.467411, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.387246, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.297422, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.040942, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.214117, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.981, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.571, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.89, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.743, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.401, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 91, - "text": "Though I cannot tell why it was exactly that those stage managers, the Fates, put me down for this shabby part of a whaling voyage, when others were set down for magnificent parts in high tragedies, and short and easy parts in genteel comedies, and jolly parts in farces-though I cannot tell why this was exactly; yet, now that I recall all the circumstances, I think I can see a little into the springs and motives which being cunningly presented to me under various disguises, induced me to set about performing the part I did, besides cajoling me into the delusion that it was a choice resulting from my own unbiased freewill and discriminating judgment.", - "input_from": 10395, - "input_to": 11052, - "tone_categories": [ - { - "tones": [ - { - "score": 0.530573, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.305188, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.287743, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.049307, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.240543, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0.255, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0.167, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.438, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.321, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.433, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.506, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.827, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 92, - "text": "Chief among these motives was the overwhelming idea of the great whale himself.", - "input_from": 11053, - "input_to": 11132, - "tone_categories": [ - { - "tones": [ - { - "score": 0.07347, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.348998, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.403245, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.326948, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.136145, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.768, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.22, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.265, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.218, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.833, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 93, - "text": "Such a portentous and mysterious monster roused all my curiosity.", - "input_from": 11133, - "input_to": 11198, - "tone_categories": [ - { - "tones": [ - { - "score": 0.287072, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.045712, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.428996, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.280744, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.139453, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0.879, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.669, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.556, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.409, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.235, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.78, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 94, - "text": "Then the wild and distant seas where he rolled his island bulk; the undeliverable, nameless perils of the whale; these, with all the attending marvels of a thousand Patagonian sights and sounds, helped to sway me to my wish.", - "input_from": 11199, - "input_to": 11423, - "tone_categories": [ - { - "tones": [ - { - "score": 0.201321, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.413132, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.328442, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.040351, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.432223, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0.014, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0.235, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.601, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.517, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.566, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.531, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.385, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 95, - "text": "With other men, perhaps, such things would not have been inducements; but as for me, I am tormented with an everlasting itch for things remote.", - "input_from": 11424, - "input_to": 11567, - "tone_categories": [ - { - "tones": [ - { - "score": 0.47654, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.418817, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.133762, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.08858, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.419193, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0.257, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0.196, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.447, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.057, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.497, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.378, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.956, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 96, - "text": "I love to sail forbidden seas, and land on barbarous coasts.", - "input_from": 11568, - "input_to": 11628, - "tone_categories": [ - { - "tones": [ - { - "score": 0.099477, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.164791, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.149077, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.425919, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.384697, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.232, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.295, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.829, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.739, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.876, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 97, - "text": "Not ignoring what is good, I am quick to perceive a horror, and could still be social with it-would they let me-since it is but well to be on friendly terms with all the inmates of the place one lodges in.", - "input_from": 11629, - "input_to": 11834, - "tone_categories": [ - { - "tones": [ - { - "score": 0.267446, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.220281, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.345987, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.061857, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.226209, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.591, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.184, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.514, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.39, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.825, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - }, - { - "sentence_id": 98, - "text": "By reason of these things, then, the whaling voyage was welcome; the great flood-gates of the wonder-world swung open, and in the wild conceits that swayed me to my purpose, two and two there floated into my inmost soul, endless processions of the whale, and, mid most of them all, one grand hooded phantom, like a snow hill in the air.", - "input_from": 11835, - "input_to": 12171, - "tone_categories": [ - { - "tones": [ - { - "score": 0.096855, - "tone_id": "anger", - "tone_name": "Anger" - }, - { - "score": 0.111949, - "tone_id": "disgust", - "tone_name": "Disgust" - }, - { - "score": 0.630888, - "tone_id": "fear", - "tone_name": "Fear" - }, - { - "score": 0.172567, - "tone_id": "joy", - "tone_name": "Joy" - }, - { - "score": 0.180281, - "tone_id": "sadness", - "tone_name": "Sadness" - } - ], - "category_id": "emotion_tone", - "category_name": "Emotion Tone" - }, - { - "tones": [ - { - "score": 0.275, - "tone_id": "analytical", - "tone_name": "Analytical" - }, - { - "score": 0.031, - "tone_id": "confident", - "tone_name": "Confident" - }, - { - "score": 0, - "tone_id": "tentative", - "tone_name": "Tentative" - } - ], - "category_id": "language_tone", - "category_name": "Language Tone" - }, - { - "tones": [ - { - "score": 0.711, - "tone_id": "openness_big5", - "tone_name": "Openness" - }, - { - "score": 0.498, - "tone_id": "conscientiousness_big5", - "tone_name": "Conscientiousness" - }, - { - "score": 0.427, - "tone_id": "extraversion_big5", - "tone_name": "Extraversion" - }, - { - "score": 0.432, - "tone_id": "agreeableness_big5", - "tone_name": "Agreeableness" - }, - { - "score": 0.662, - "tone_id": "emotional_range_big5", - "tone_name": "Emotional Range" - } - ], - "category_id": "social_tone", - "category_name": "Social Tone" - } - ] - } - ] -} \ No newline at end of file diff --git a/resources/tone-v3-expect2.json b/resources/tone-v3-expect2.json deleted file mode 100644 index bbd93ad1d..000000000 --- a/resources/tone-v3-expect2.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "utterances_tone": [ - { - "utterance_id": 0, - "utterance_text": "I am very happy", - "tones": [ - { - "score": 0.875529, - "tone_id": "polite", - "tone_name": "polite" - }, - { - "score": 0.838693, - "tone_id": "satisfied", - "tone_name": "satisfied" - }, - { - "score": 0.844135, - "tone_id": "sympathetic", - "tone_name": "sympathetic" - }, - { - "score": 0.916255, - "tone_id": "excited", - "tone_name": "excited" - } - ] - } - ] -} diff --git a/resources/tts_audio.wav b/resources/tts_audio.wav new file mode 100644 index 000000000..ba4760649 Binary files /dev/null and b/resources/tts_audio.wav differ diff --git a/setup.py b/setup.py index 6cb73f07b..6947cb83d 100644 --- a/setup.py +++ b/setup.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -# Copyright 2016 IBM All Rights Reserved. +# (C) Copyright IBM Corp. 2015, 2025. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,78 +13,41 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function from setuptools import setup -from setuptools.command.test import test as TestCommand -import os -import sys +from os import path -__version__ = '3.0.4' - -if sys.argv[-1] == 'publish': - # test server - os.system('python setup.py register -r pypitest') - os.system('python setup.py sdist upload -r pypitest') - - # production server - os.system('python setup.py register -r pypi') - os.system('python setup.py sdist upload -r pypi') - sys.exit() - -# Convert README.md to README.rst for pypi -try: - from pypandoc import convert_file - - def read_md(f): - return convert_file(f, 'rst') - - # read_md = lambda f: convert(f, 'rst') -except: - print('warning: pypandoc module not found, ' - 'could not convert Markdown to RST') - - def read_md(f): - return open(f, 'rb').read().decode(encoding='utf-8') - # read_md = lambda f: open(f, 'rb').read().decode(encoding='utf-8') - - -class PyTest(TestCommand): - def finalize_options(self): - TestCommand.finalize_options(self) - self.test_args = ['--strict', '--verbose', '--tb=long', 'test'] - self.test_suite = True - - def run_tests(self): - import pytest - errcode = pytest.main(self.test_args) - sys.exit(errcode) +__version__ = '11.2.0' +# read contents of README file +this_directory = path.abspath(path.dirname(__file__)) +with open(path.join(this_directory, 'README.md'), encoding='utf-8') as file: + readme_file = file.read() setup(name='ibm-watson', version=__version__, description='Client library to use the IBM Watson Services', + packages=['ibm_watson'], + install_requires=['requests>=2.0, <3.0', 'python_dateutil>=2.5.3', 'websocket-client>=1.1.0', 'ibm_cloud_sdk_core>=3.3.6, == 3.*'], + tests_require=['responses', 'pytest', 'python_dotenv', 'pytest-rerunfailures'], license='Apache 2.0', - install_requires=['requests>=2.0, <3.0', 'python_dateutil>=2.5.3', 'websocket-client==0.48.0', 'ibm_cloud_sdk_core>=0.2.0'], - tests_require=['responses', 'pytest', 'python_dotenv', 'pytest-rerunfailures', 'tox'], - cmdclass={'test': PyTest}, author='IBM Watson', author_email='watdevex@us.ibm.com', - long_description=read_md('README.md'), + long_description=readme_file, + long_description_content_type='text/markdown', url='https://github.com/watson-developer-cloud/python-sdk', - packages=['ibm_watson'], include_package_data=True, - keywords='language, vision, question and answer' + - ' tone_analyzer, natural language classifier,' + - ' text to speech, language translation, ' + + keywords='language, question and answer,' + + ' tone_analyzer,' + + ' text to speech,' + 'language identification, concept expansion, machine translation, ' + - 'personality insights, message resonance, watson developer cloud, ' + + 'message resonance, watson developer cloud, ' + ' wdc, watson, ibm, dialog, user modeling,' + - 'tone analyzer, speech to text, visual recognition', + 'speech to text', classifiers=[ 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', - 'Development Status :: 4 - Beta', + 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', diff --git a/test/integration/__init__.py b/test/integration/__init__.py index 161119efe..949039f3b 100644 --- a/test/integration/__init__.py +++ b/test/integration/__init__.py @@ -1,5 +1,4 @@ # coding: utf-8 -from __future__ import print_function from dotenv import load_dotenv, find_dotenv # load the .env file containing your environment variables for the required diff --git a/test/integration/test_assistant_v1.py b/test/integration/test_assistant_v1.py new file mode 100644 index 000000000..306e5d489 --- /dev/null +++ b/test/integration/test_assistant_v1.py @@ -0,0 +1,117 @@ +# coding: utf-8 +from unittest import TestCase +from ibm_cloud_sdk_core.authenticators import IAMAuthenticator, BearerTokenAuthenticator +from os.path import abspath +import os +import ibm_watson +import pytest +import json + +@pytest.mark.skipif(os.getenv('ASSISTANT_APIKEY') is None, + reason='requires ASSISTANT_APIKEY') +class TestAssistantV1(TestCase): + + @classmethod + def setup_class(cls): + + create_workspace_data = { + "name": + "test_workspace", + "description": + "integration tests", + "language": + "en", + "intents": [{ + "intent": "hello", + "description": "string", + "examples": [{ + "text": "good morning" + }] + }], + "entities": [{ + "entity": "pizza_toppings", + "description": "Tasty pizza toppings", + "metadata": { + "property": "value" + } + }], + "counterexamples": [{ + "text": "string" + }], + "metadata": {}, + } + + authenticator = IAMAuthenticator(os.getenv('ASSISTANT_APIKEY')) + cls.assistant = ibm_watson.AssistantV1( + version='2018-07-10', + authenticator=authenticator + ) + cls.assistant.set_default_headers({ + 'X-Watson-Learning-Opt-Out': '1', + 'X-Watson-Test': '1' + }) + + response = cls.assistant.create_workspace( + name=create_workspace_data['name'], + description=create_workspace_data['description'], + language='en', + intents=create_workspace_data['intents'], + entities=create_workspace_data['entities'], + counterexamples=create_workspace_data['counterexamples'], + metadata=create_workspace_data['metadata']).get_result() + + cls.workspace_id = response['workspace_id'] + + examples = [{"text": "good morning"}] + response = cls.assistant.create_intent( + workspace_id=cls.workspace_id, + intent='test_intent', + description='Test intent.', + examples=examples).get_result() + + @classmethod + def teardown_class(cls): + response = cls.assistant.delete_intent(workspace_id=cls.workspace_id, intent='updated_test_intent').get_result() + assert response is not None + + response = cls.assistant.delete_workspace(cls.workspace_id).get_result() + assert response is not None + + def test_workspace(self): + response = self.assistant.get_workspace(self.workspace_id, export=True).get_result() + assert response is not None + + response = self.assistant.list_workspaces().get_result() + assert response is not None + print(json.dumps(response, indent=2)) + + response = self.assistant.message(self.workspace_id, + input={ + 'text': 'What\'s the weather like?' + }, + context={ + 'metadata': { + 'deployment': 'myDeployment' + } + }).get_result() + assert response is not None + + response = self.assistant.update_workspace(workspace_id=self.workspace_id, description='Updated test workspace.').get_result() + assert response is not None + + def test_intent(self): + response = self.assistant.get_intent( + workspace_id=self.workspace_id, intent='test_intent', export=True).get_result() + assert response is not None + + response = self.assistant.update_intent( + workspace_id=self.workspace_id, + intent='test_intent', + new_intent='updated_test_intent', + new_description='Updated test intent.').get_result() + assert response is not None + + response = self.assistant.list_intents( + workspace_id=self.workspace_id, export=True).get_result() + assert response is not None + print(json.dumps(response, indent=2)) diff --git a/test/integration/test_assistant_v2.py b/test/integration/test_assistant_v2.py new file mode 100644 index 000000000..db17bcae0 --- /dev/null +++ b/test/integration/test_assistant_v2.py @@ -0,0 +1,65 @@ +# coding: utf-8 + +# Copyright 2019, 2024 IBM All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from unittest import TestCase +import ibm_watson +from ibm_watson.assistant_v2 import MessageInput +from ibm_watson.common import parse_sse_stream_data +import pytest +import json +from ibm_cloud_sdk_core.authenticators import IAMAuthenticator + +class TestAssistantV2(TestCase): + + def setUp(self): + + with open('./auth.json') as f: + data = json.load(f) + assistant_auth = data.get("assistantv2") + self.assistant_id = assistant_auth.get("assistantId") + self.environment_id = assistant_auth.get("environmentId") + + self.authenticator = IAMAuthenticator(apikey=assistant_auth.get("apikey")) + self.assistant = ibm_watson.AssistantV2(version='2024-08-25', authenticator=self.authenticator) + self.assistant.set_service_url(assistant_auth.get("serviceUrl")) + self.assistant.set_default_headers({ + 'X-Watson-Learning-Opt-Out': '1', + 'X-Watson-Test': '1' + }) + + def test_list_assistants(self): + response = self.assistant.list_assistants().get_result() + assert response is not None + + def test_message_stream_stateless(self): + input = MessageInput(message_type="text", text="can you list the steps to create a custom extension?") + user_id = "Angelo" + + response = self.assistant.message_stream_stateless(self.assistant_id, self.environment_id, input=input, user_id=user_id).get_result() + + for data in parse_sse_stream_data(response): + # One of these items must exist + # assert "partial_item" in data_json or "complete_item" in data_json or "final_item" in data_json + + if "partial_item" in data: + assert data["partial_item"]["text"] is not None + elif "complete_item" in data: + assert data["complete_item"]["text"] is not None + elif "final_response" in data: + assert data["final_response"] is not None + else: + pytest.fail("Should be impossible to get here") + diff --git a/test/integration/test_compare_comply_v1.py b/test/integration/test_compare_comply_v1.py deleted file mode 100644 index 2683c42c3..000000000 --- a/test/integration/test_compare_comply_v1.py +++ /dev/null @@ -1,167 +0,0 @@ -# coding: utf-8 -import pytest -import ibm_watson -import os -from os.path import abspath -from unittest import TestCase - -@pytest.mark.skipif( - os.getenv('VCAP_SERVICES') is None, reason='requires VCAP_SERVICES') -class IntegrationTestCompareComplyV1(TestCase): - compare_comply = None - - @classmethod - def setup_class(cls): - cls.compare_comply = ibm_watson.CompareComplyV1( - '2018-10-15') - cls.compare_comply.set_default_headers({ - 'X-Watson-Learning-Opt-Out': - '1', - 'X-Watson-Test': - '1' - }) - - def test_convert_to_html(self): - contract = abspath('resources/contract_A.pdf') - with open(contract, 'rb') as file: - result = self.compare_comply.convert_to_html(file).get_result() - assert result is not None - - def test_classify_elements(self): - contract = abspath('resources/contract_A.pdf') - with open(contract, 'rb') as file: - result = self.compare_comply.classify_elements(file, 'application/pdf').get_result() - assert result is not None - - def test_extract_tables(self): - table = abspath('resources/contract_A.pdf') - with open(table, 'rb') as file: - result = self.compare_comply.extract_tables(file).get_result() - assert result is not None - - def test_compare_documents(self): - with open(os.path.join(os.path.dirname(__file__), '../../resources/contract_A.pdf'), 'rb') as file1, \ - open(os.path.join(os.path.dirname(__file__), '../../resources/contract_B.pdf'), 'rb') as file2: - result = self.compare_comply.compare_documents(file1, file2).get_result() - - assert result is not None - - @pytest.mark.skip(reason="Temporarily skip") - def test_feedback(self): - feedback_data = { - 'feedback_type': 'element_classification', - 'document': { - 'hash': '', - 'title': 'doc title' - }, - 'model_id': 'contracts', - 'model_version': '11.00', - 'location': { - 'begin': '214', - 'end': '237' - }, - 'text': '1. IBM will provide a Senior Managing Consultant / expert resource, for up to 80 hours, to assist Florida Power & Light (FPL) with the creation of an IT infrastructure unit cost model for existing infrastructure.', - 'original_labels': { - 'types': [ - { - 'label': { - 'nature': 'Obligation', - 'party': 'IBM' - }, - 'provenance_ids': [ - '85f5981a-ba91-44f5-9efa-0bd22e64b7bc', - 'ce0480a1-5ef1-4c3e-9861-3743b5610795' - ] - }, - { - 'label': { - 'nature': 'End User', - 'party': 'Exclusion' - }, - 'provenance_ids': [ - '85f5981a-ba91-44f5-9efa-0bd22e64b7bc', - 'ce0480a1-5ef1-4c3e-9861-3743b5610795' - ] - } - ], - 'categories': [ - { - 'label': 'Responsibilities', - 'provenance_ids': [] - }, - { - 'label': 'Amendments', - 'provenance_ids': [] - } - ] - }, - 'updated_labels': { - 'types': [ - { - 'label': { - 'nature': 'Obligation', - 'party': 'IBM' - } - }, - { - 'label': { - 'nature': 'Disclaimer', - 'party': 'Buyer' - } - } - ], - 'categories': [ - { - 'label': 'Responsibilities' - }, - { - 'label': 'Audits' - } - ] - } - } - - add_feedback = self.compare_comply.add_feedback( - feedback_data, - 'wonder woman', - 'test commment').get_result() - assert add_feedback is not None - assert add_feedback['feedback_id'] is not None - feedback_id = add_feedback['feedback_id'] - - self.compare_comply.set_default_headers({'x-watson-metadata': 'customer_id=sdk-test-customer-id'}) - get_feedback = self.compare_comply.get_feedback(feedback_id).get_result() - assert get_feedback is not None - - list_feedback = self.compare_comply.list_feedback( - feedback_type='element_classification').get_result() - assert list_feedback is not None - - delete_feedback = self.compare_comply.delete_feedback(feedback_id).get_result() - assert delete_feedback is not None - - @pytest.mark.skip(reason="Temporarily skip") - def test_batches(self): - list_batches = self.compare_comply.list_batches().get_result() - assert list_batches is not None - - with open(os.path.join(os.path.dirname(__file__), '../../resources/cloud-object-storage-credentials-input.json'), 'rb') as input_credentials_file, \ - open(os.path.join(os.path.dirname(__file__), '../../resources/cloud-object-storage-credentials-output.json'), 'rb') as output_credentials_file: - create_batch = self.compare_comply.create_batch( - 'html_conversion', - input_credentials_file, - 'us-south', - 'compare-comply-integration-test-bucket-input', - output_credentials_file, - 'us-south', - 'compare-comply-integration-test-bucket-output').get_result() - - assert create_batch is not None - assert create_batch['batch_id'] is not None - batch_id = create_batch['batch_id'] - - get_batch = self.compare_comply.get_batch(batch_id) - assert get_batch is not None - - update_batch = self.compare_comply.update_batch(batch_id, 'rescan') - assert update_batch is not None diff --git a/test/integration/test_discovery_v1.py b/test/integration/test_discovery_v1.py deleted file mode 100644 index 1f3a19f70..000000000 --- a/test/integration/test_discovery_v1.py +++ /dev/null @@ -1,260 +0,0 @@ -# coding: utf-8 -from unittest import TestCase -import os -import ibm_watson -import random -import pytest - -@pytest.mark.skipif( - os.getenv('VCAP_SERVICES') is None, reason='requires VCAP_SERVICES') -class Discoveryv1(TestCase): - def setUp(self): - self.discovery = ibm_watson.DiscoveryV1( - version='2018-08-01') - self.discovery.set_default_headers({ - 'X-Watson-Learning-Opt-Out': '1', - 'X-Watson-Test': '1' - }) - self.environment_id = 'e15f6424-f887-4f50-b4ea-68267c36fc9c' # This environment is created for integration testing - collections = self.discovery.list_collections(self.environment_id).get_result()['collections'] - self.collection_id = collections[0]['collection_id'] - - for collection in collections: - if collection['name'] == 'DO-NOT-DELETE-JAPANESE-COLLECTION': - self.collection_id_JP = collection['collection_id'] - - def tearDown(self): - collections = self.discovery.list_collections(self.environment_id).get_result()['collections'] - for collection in collections: - if not collection['name'].startswith('DO-NOT-DELETE'): - self.discovery.delete_collection(self.environment_id, collection['collection_id']) - - def test_environments(self): - envs = self.discovery.list_environments().get_result() - assert envs is not None - env = self.discovery.get_environment( - envs['environments'][0]['environment_id']).get_result() - assert env is not None - fields = self.discovery.list_fields(self.environment_id, - self.collection_id).get_result() - assert fields is not None - - def test_configurations(self): - configs = self.discovery.list_configurations(self.environment_id).get_result() - assert configs is not None - - name = 'test' + random.choice('ABCDEFGHIJKLMNOPQ') - new_configuration_id = self.discovery.create_configuration( - self.environment_id, name, - 'creating new config for python sdk').get_result()['configuration_id'] - assert new_configuration_id is not None - self.discovery.get_configuration(self.environment_id, - new_configuration_id).get_result() - - updated_config = self.discovery.update_configuration( - self.environment_id, new_configuration_id, 'lala').get_result() - assert updated_config['name'] == 'lala' - - deleted_config = self.discovery.delete_configuration( - self.environment_id, new_configuration_id).get_result() - assert deleted_config['status'] == 'deleted' - - def test_collections_and_expansions(self): - name = 'Example collection for python' + random.choice('ABCDEFGHIJKLMNOPQ') - new_collection_id = self.discovery.create_collection( - self.environment_id, - name, - description="Integration test for python sdk").get_result()['collection_id'] - assert new_collection_id is not None - - self.discovery.get_collection(self.environment_id, new_collection_id) - updated_collection = self.discovery.update_collection( - self.environment_id, new_collection_id, name, description='Updating description').get_result() - assert updated_collection['description'] == 'Updating description' - - self.discovery.create_expansions(self.environment_id, - new_collection_id, [{ - 'input_terms': ['a'], - 'expanded_terms': ['aa'] - }]).get_result() - expansions = self.discovery.list_expansions(self.environment_id, - new_collection_id).get_result() - assert expansions['expansions'] - self.discovery.delete_expansions(self.environment_id, - new_collection_id) - - deleted_collection = self.discovery.delete_collection( - self.environment_id, new_collection_id).get_result() - assert deleted_collection['status'] == 'deleted' - - def test_documents(self): - with open(os.path.join(os.path.dirname(__file__), '../../resources/simple.html'), 'r') as fileinfo: - add_doc = self.discovery.add_document( - environment_id=self.environment_id, - collection_id=self.collection_id, - file=fileinfo).get_result() - assert add_doc['document_id'] is not None - - doc_status = self.discovery.get_document_status( - self.environment_id, self.collection_id, add_doc['document_id']).get_result() - assert doc_status is not None - - with open(os.path.join(os.path.dirname(__file__), '../../resources/simple.html'), 'r') as fileinfo: - update_doc = self.discovery.update_document( - self.environment_id, - self.collection_id, - add_doc['document_id'], - file=fileinfo, - filename='newname.html').get_result() - assert update_doc is not None - delete_doc = self.discovery.delete_document( - self.environment_id, self.collection_id, add_doc['document_id']).get_result() - assert delete_doc['status'] == 'deleted' - - def test_queries(self): - query_results = self.discovery.query( - self.environment_id, - self.collection_id, - filter='extracted_metadata.sha1::9181d244*', - return_fields='extracted_metadata.sha1').get_result() - assert query_results is not None - - @pytest.mark.skip(reason="Temporary skipping because update_credentials fails") - def test_credentials(self): - credential_details = { - 'credential_type': 'username_password', - 'url': 'https://login.salesforce.com', - 'username': 'user@email.com', - 'password': 'xxx' - } - credentials = self.discovery.create_credentials(self.environment_id, - 'salesforce', - credential_details).get_result() - assert credentials['credential_id'] is not None - credential_id = credentials['credential_id'] - - get_credentials = self.discovery.get_credentials(self.environment_id, credential_id).get_result() - assert get_credentials['credential_id'] == credential_id - - list_credentials = self.discovery.list_credentials(self.environment_id).get_result() - assert list_credentials is not None - - new_credential_details = { - 'credential_type': 'username_password', - 'url': 'https://logo.salesforce.com', - 'username': 'user@email.com', - 'password': 'xxx' - } - updated_credentials = self.discovery.update_credentials(self.environment_id, credential_id, 'salesforce', new_credential_details).get_result() - assert updated_credentials is not None - - get_credentials = self.discovery.get_credentials(self.environment_id, credentials['credential_id']).get_result() - assert get_credentials['credential_details']['url'] == new_credential_details['url'] - - delete_credentials = self.discovery.delete_credentials(self.environment_id, credential_id).get_result() - assert delete_credentials['credential_id'] is not None - - def test_create_event(self): - # create test document - with open(os.path.join(os.path.dirname(__file__), '../../resources/simple.html'), 'r') as fileinfo: - add_doc = self.discovery.add_document( - environment_id=self.environment_id, - collection_id=self.collection_id, - file=fileinfo).get_result() - assert add_doc['document_id'] is not None - document_id = add_doc['document_id'] - - # make query to get session token - query = self.discovery.query(self.environment_id, - self.collection_id, - natural_language_query='The content of the first chapter').get_result() - assert query['session_token'] is not None - - # create_event - event_data = { - "environment_id": self.environment_id, - "session_token": query['session_token'], - "collection_id": self.collection_id, - "document_id": document_id, - } - create_event_response = self.discovery.create_event('click', event_data).get_result() - assert create_event_response['type'] == 'click' - - #delete the documment - self.discovery.delete_document(self.environment_id, - self.collection_id, - document_id).get_result() - - def test_tokenization_dictionary(self): - result = self.discovery.get_tokenization_dictionary_status( - self.environment_id, - self.collection_id_JP - ).get_result() - assert result['status'] is not None - - def test_feedback(self): - response = self.discovery.get_metrics_event_rate('2018-08-13T14:39:59.309Z', - '2018-08-14T14:39:59.309Z', - 'document').get_result() - assert response['aggregations'] is not None - - response = self.discovery.get_metrics_query('2018-08-13T14:39:59.309Z', - '2018-08-14T14:39:59.309Z', - 'document').get_result() - assert response['aggregations'] is not None - - response = self.discovery.get_metrics_query_event('2018-08-13T14:39:59.309Z', - '2018-08-14T14:39:59.309Z', - 'document').get_result() - assert response['aggregations'] is not None - - response = self.discovery.get_metrics_query_no_results('2018-07-13T14:39:59.309Z', - '2018-08-14T14:39:59.309Z', - 'document').get_result() - assert response['aggregations'] is not None - - response = self.discovery.get_metrics_query_token_event(10).get_result() - assert response['aggregations'] is not None - - response = self.discovery.query_log(count=2).get_result() - assert response is not None - - @pytest.mark.skip(reason="Skip temporarily.") - def test_stopword_operations(self): - with open(os.path.join(os.path.dirname(__file__), '../../resources/stopwords.txt'), 'r') as stopwords_file: - create_stopword_list_result = self.discovery.create_stopword_list( - self.environment_id, - self.collection_id, - stopwords_file - ).get_result() - assert create_stopword_list_result is not None - - delete_stopword_list_result = self.discovery.delete_stopword_list( - self.environment_id, - self.collection_id - ).get_result() - assert delete_stopword_list_result is None - - def test_gateway_configuration(self): - create_gateway_result = self.discovery.create_gateway( - self.environment_id, - 'test-gateway-configuration-python' - ).get_result() - assert create_gateway_result['gateway_id'] is not None - - get_gateway_result = self.discovery.get_gateway( - self.environment_id, - create_gateway_result['gateway_id'] - ).get_result() - assert get_gateway_result is not None - - list_gateways_result = self.discovery.list_gateways( - self.environment_id - ).get_result() - assert list_gateways_result is not None - - delete_gateways_result = self.discovery.delete_gateway( - self.environment_id, - create_gateway_result['gateway_id'] - ).get_result() - assert delete_gateways_result is not None diff --git a/test/integration/test_discovery_v2.py b/test/integration/test_discovery_v2.py new file mode 100644 index 000000000..aa1a402cc --- /dev/null +++ b/test/integration/test_discovery_v2.py @@ -0,0 +1,132 @@ +# coding: utf-8 +from unittest import TestCase +from ibm_cloud_sdk_core.authenticators import IAMAuthenticator, BearerTokenAuthenticator +from ibm_watson.discovery_v2 import CreateEnrichment, EnrichmentOptions +from os.path import abspath +import os +import ibm_watson +import pytest + + +@pytest.mark.skipif(os.getenv('DISCOVERY_V2_APIKEY') is None, + reason='requires DISCOVERY_V2_APIKEY') +class Discoveryv2(TestCase): + discovery = None + project_id = os.getenv('DISCOVERY_V2_PROJECT_ID') # This project is created for integration testing + collection_id = None + collection_name = 'python_test_collection' + + @classmethod + def setup_class(cls): + authenticator = IAMAuthenticator(os.getenv('DISCOVERY_V2_APIKEY')) + cls.discovery = ibm_watson.DiscoveryV2( + version='2020-08-12', + authenticator=authenticator + ) + cls.discovery.set_service_url(os.getenv('DISCOVERY_V2_URL')) + cls.discovery.set_default_headers({ + 'X-Watson-Learning-Opt-Out': '1', + 'X-Watson-Test': '1' + }) + + collections = cls.discovery.list_collections( + cls.project_id).get_result()['collections'] + for collection in collections: + if collection['name'] == cls.collection_name: + cls.collection_id = collection['collection_id'] + + if cls.collection_id is None: + print("Creating a new temporary collection") + cls.collection_id = cls.discovery.create_collection( + cls.project_id, + cls.collection_name, + description="Integration test for python sdk").get_result( + )['collection_id'] + + @classmethod + def teardown_class(cls): + collections = cls.discovery.list_collections( + cls.project_id).get_result()['collections'] + for collection in collections: + if collection['name'] == cls.collection_name: + print('Deleting the temporary collection') + cls.discovery.delete_collection(cls.project_id, + cls.collection_id) + break + + def test_projects(self): + projs = self.discovery.list_projects().get_result() + assert projs is not None + proj = self.discovery.get_project( + self.project_id).get_result() + assert proj is not None + + def test_collections(self): + cols = self.discovery.list_collections(self.project_id).get_result() + assert cols is not None + col = self.discovery.get_collection( + self.project_id, + self.collection_id + ).get_result() + assert col is not None + + def test_enrichments(self): + enrs = self.discovery.list_enrichments(self.project_id).get_result() + print(enrs) + assert enrs is not None + + enrichmentOptions = EnrichmentOptions( + languages=["en"], + entity_type="keyword" + ) + enrichment = CreateEnrichment( + name="python test enrichment", + description="test enrichment", + type="dictionary", + options=enrichmentOptions + ) + with open(os.path.join(os.path.dirname(__file__), '../../resources/TestEnrichments.csv'), 'r') as fileinfo: + enr = self.discovery.create_enrichment( + project_id=self.project_id, + enrichment=enrichment._to_dict(), + file=fileinfo + ).get_result() + assert enr is not None + enrichment_id = enr["enrichment_id"] + enrichment = self.discovery.get_enrichment( + self.project_id, + enrichment_id + ).get_result() + assert enrichment is not None + enr = self.discovery.update_enrichment( + project_id=self.project_id, + enrichment_id=enrichment_id, + name="python test enrichment", + description="updated description" + ).get_result() + assert enr is not None + self.discovery.delete_enrichment( + self.project_id, + enrichment_id + ) + + # can only test in CPD + @pytest.mark.skip(reason="can only test in CPD") + def test_analyze(self): + authenticator = BearerTokenAuthenticator('') + discovery_cpd = ibm_watson.DiscoveryV2( + version='2020-08-12', + authenticator=authenticator + ) + discovery_cpd.service_url = "" + discovery_cpd.set_disable_ssl_verification(True) + test_file = abspath('resources/problem.json') + with open(test_file, 'rb') as file: + result = discovery_cpd.analyze_document( + project_id="", + collection_id="", + file=file, + file_content_type="application/json" + ).get_result() + assert result is not None + diff --git a/test/integration/test_examples.py b/test/integration/test_examples.py index 9e5e7d634..aafcbf83a 100644 --- a/test/integration/test_examples.py +++ b/test/integration/test_examples.py @@ -1,6 +1,5 @@ # coding=utf-8 -from __future__ import print_function import re import traceback import pytest @@ -9,11 +8,12 @@ from os.path import join, dirname from glob import glob -# tests to exclude -excludes = ['authorization_v1.py', 'discovery_v1.ipynb', '__init__.py', 'microphone-speech-to-text.py'] +# tests to include +includes = ['assistant_v1.py', 'natural_language_understanding_v1.py'] # examples path. /examples -examples_path = join(dirname(__file__), '../', 'examples', '*.py') +examples_path = join(dirname(__file__), '../../', 'examples', '*.py') + @pytest.mark.skipif(os.getenv('VCAP_SERVICES') is None, reason='requires VCAP_SERVICES') @@ -23,17 +23,13 @@ def test_examples(): for example in examples: name = example.split('/')[-1] - # exclude some tests cases like authorization - if name in excludes: + if name not in includes: continue - # exclude tests if there are no credentials for that service - service_name = name[:-6] if not name.startswith('visual_recognition')\ - else 'watson_vision_combined' + service_name = name[:-6] if service_name not in vcap_services: - print('%s does not have credentials in VCAP_SERVICES', - service_name) + print('%s does not have credentials in VCAP_SERVICES', service_name) continue try: diff --git a/test/integration/test_natural_language_classifier_v1.py b/test/integration/test_natural_language_classifier_v1.py deleted file mode 100644 index f9122be1f..000000000 --- a/test/integration/test_natural_language_classifier_v1.py +++ /dev/null @@ -1,54 +0,0 @@ -# coding: utf-8 -from unittest import TestCase -import os -import ibm_watson -import pytest -import json -import time - -FIVE_SECONDS = 5 - -@pytest.mark.skipif(os.getenv('VCAP_SERVICES') is None, reason='requires VCAP_SERVICES') -class TestNaturalLanguageClassifierV1(TestCase): - def setUp(self): - self.natural_language_classifier = ibm_watson.NaturalLanguageClassifierV1() - self.natural_language_classifier.set_default_headers({ - 'X-Watson-Learning-Opt-Out': '1', - 'X-Watson-Test': '1' - }) - - # Create a classifier - with open(os.path.join(os.path.dirname(__file__), '../../resources/weather_data_train.csv'), 'rb') as training_data: - metadata = json.dumps({'name': 'my-classifier', 'language': 'en'}) - classifier = self.natural_language_classifier.create_classifier( - metadata=metadata, - training_data=training_data - ).get_result() - self.classifier_id = classifier['classifier_id'] - - def tearDown(self): - self.natural_language_classifier.delete_classifier(self.classifier_id) - - def test_list_classifier(self): - list_classifiers = self.natural_language_classifier.list_classifiers().get_result() - assert list_classifiers is not None - - @pytest.mark.skip(reason="The classifier takes more than a minute") - def test_classify_text(self): - iterations = 0 - while iterations < 15: - status = self.natural_language_classifier.get_classifier(self.classifier_id).get_result() - iterations += 1 - if status['status'] != 'Available': - time.sleep(FIVE_SECONDS) - - if status['status'] != 'Available': - assert False, 'Classifier is not available' - - classes = self.natural_language_classifier.classify(self.classifier_id, 'How hot will it be tomorrow?').get_result() - assert classes is not None - - collection = ['{"text":"How hot will it be today?"}', '{"text":"Is it hot outside?"}'] - classes = self.natural_language_classifier.classify_collection( - self.classifier_id, collection).get_result() - assert classes is not None diff --git a/test/integration/test_natural_language_understanding_v1.py b/test/integration/test_natural_language_understanding_v1.py new file mode 100644 index 000000000..1255b5067 --- /dev/null +++ b/test/integration/test_natural_language_understanding_v1.py @@ -0,0 +1,31 @@ +# coding: utf-8 +from unittest import TestCase +import os +import ibm_watson +import pytest +import json +import time +from ibm_watson.natural_language_understanding_v1 import Features, EntitiesOptions, KeywordsOptions +from ibm_cloud_sdk_core.authenticators import IAMAuthenticator + +class TestNaturalLanguageUnderstandingV1(TestCase): + + def setUp(self): + + with open('./auth.json') as f: + data = json.load(f) + nlu_auth = data.get("nlu") + + self.authenticator = IAMAuthenticator(nlu_auth.get("apikey")) + self.natural_language_understanding = ibm_watson.NaturalLanguageUnderstandingV1(version='2018-03-16', authenticator=self.authenticator) + self.natural_language_understanding.set_default_headers({ + 'X-Watson-Learning-Opt-Out': '1', + 'X-Watson-Test': '1' + }) + + def test_analyze(self): + response = self.natural_language_understanding.analyze( + text='Bruce Banner is the Hulk and Bruce Wayne is BATMAN! ' + 'Superman fears not Banner, but Wayne.', + features=Features(entities=EntitiesOptions(), keywords=KeywordsOptions())).get_result() + assert response is not None diff --git a/test/integration/test_speech_to_text_v1.py b/test/integration/test_speech_to_text_v1.py index 5dd4e5dfc..4defbea19 100644 --- a/test/integration/test_speech_to_text_v1.py +++ b/test/integration/test_speech_to_text_v1.py @@ -1,4 +1,3 @@ -from __future__ import print_function from unittest import TestCase import os from ibm_watson.websocket import RecognizeCallback, AudioSource @@ -6,8 +5,9 @@ import pytest import threading -@pytest.mark.skipif( - os.getenv('VCAP_SERVICES') is None, reason='requires VCAP_SERVICES') + +@pytest.mark.skipif(os.getenv('SPEECH_TO_TEXT_APIKEY') is None, + reason='requires SPEECH_TO_TEXT_APIKEY') class TestSpeechToTextV1(TestCase): text_to_speech = None custom_models = None @@ -18,26 +18,26 @@ class TestSpeechToTextV1(TestCase): def setup_class(cls): cls.speech_to_text = ibm_watson.SpeechToTextV1() cls.speech_to_text.set_default_headers({ - 'X-Watson-Learning-Opt-Out': - '1', - 'X-Watson-Test': - '1' + 'X-Watson-Learning-Opt-Out': '1', + 'X-Watson-Test': '1' }) - cls.custom_models = cls.speech_to_text.list_language_models().get_result() + cls.custom_models = cls.speech_to_text.list_language_models( + ).get_result() cls.create_custom_model = cls.speech_to_text.create_language_model( name="integration_test_model", base_model_name="en-US_BroadbandModel").get_result() - cls.customization_id = cls.create_custom_model['customization_id'] + cls.customization_id = cls.create_custom_model.get('customization_id') @classmethod def teardown_class(cls): cls.speech_to_text.delete_language_model( - customization_id=cls.create_custom_model['customization_id']) + customization_id=cls.create_custom_model.get('customization_id')) def test_models(self): output = self.speech_to_text.list_models().get_result() assert output is not None - model = self.speech_to_text.get_model('ko-KR_BroadbandModel').get_result() + model = self.speech_to_text.get_model( + 'ko-KR_BroadbandModel').get_result() assert model is not None try: self.speech_to_text.get_model('bogus') @@ -45,14 +45,18 @@ def test_models(self): assert 'X-global-transaction-id:' in str(e) def test_create_custom_model(self): - current_custom_models = self.speech_to_text.list_language_models().get_result() + current_custom_models = self.speech_to_text.list_language_models( + ).get_result() assert len(current_custom_models['customizations']) - len( - self.custom_models['customizations']) >= 1 + self.custom_models.get('customizations')) >= 1 def test_recognize(self): - with open(os.path.join(os.path.dirname(__file__), '../../resources/speech.wav'), 'rb') as audio_file: + with open( + os.path.join(os.path.dirname(__file__), + '../../resources/speech.wav'), 'rb') as audio_file: output = self.speech_to_text.recognize( - audio=audio_file, content_type='audio/l16; rate=44100').get_result() + audio=audio_file, + content_type='audio/l16; rate=44100').get_result() assert output['results'][0]['alternatives'][0][ 'transcript'] == 'thunderstorms could produce large hail isolated tornadoes and heavy rain ' @@ -61,8 +65,9 @@ def test_recognitions(self): assert output is not None def test_custom_corpora(self): - output = self.speech_to_text.list_corpora(self.customization_id).get_result() - assert len(output['corpora']) == 0 # pylint: disable=len-as-condition + output = self.speech_to_text.list_corpora( + self.customization_id).get_result() + assert not output['corpora'] def test_acoustic_model(self): list_models = self.speech_to_text.list_acoustic_models().get_result() @@ -84,7 +89,9 @@ def test_acoustic_model(self): get_acoustic_model['customization_id']).get_result() def test_recognize_using_websocket(self): + class MyRecognizeCallback(RecognizeCallback): + def __init__(self): RecognizeCallback.__init__(self) self.error = None @@ -93,22 +100,87 @@ def __init__(self): def on_error(self, error): self.error = error - def on_transcription(self, transcript): - self.transcript = transcript + def on_data(self, data): + self.data = data test_callback = MyRecognizeCallback() - with open(os.path.join(os.path.dirname(__file__), '../../resources/speech.wav'), 'rb') as audio_file: + with open( + os.path.join(os.path.dirname(__file__), + '../../resources/speech.wav'), 'rb') as audio_file: audio_source = AudioSource(audio_file, False) - t = threading.Thread(target=self.speech_to_text.recognize_using_websocket, args=(audio_source, "audio/l16; rate=44100", test_callback)) + t = threading.Thread( + target=self.speech_to_text.recognize_using_websocket, + args=(audio_source, "audio/l16; rate=44100", test_callback)) t.start() t.join() assert test_callback.error is None - assert test_callback.transcript is not None - assert test_callback.transcript[0]['transcript'] == 'thunderstorms could produce large hail isolated tornadoes and heavy rain ' + assert test_callback.data is not None + assert test_callback.data['results'][0]['alternatives'][0] + ['transcript'] == 'thunderstorms could produce large hail isolated tornadoes and heavy rain ' + def test_on_transcription_interim_results_false(self): + class MyRecognizeCallback(RecognizeCallback): + def __init__(self): + RecognizeCallback.__init__(self) + self.error = None + self.transcript = None + def on_error(self, error): + self.error = error + def on_transcription(self, transcript): + self.transcript = transcript + test_callback = MyRecognizeCallback() + with open(os.path.join(os.path.dirname(__file__), '../../resources/speech_with_pause.wav'), 'rb') as audio_file: + audio_source = AudioSource(audio_file, False) + self.speech_to_text.recognize_using_websocket(audio_source, "audio/wav", test_callback, model="en-US_Telephony", + interim_results=False, low_latency=False) + assert test_callback.error is None + assert test_callback.transcript is not None + assert test_callback.transcript[0][0]['transcript'] in ['isolated tornadoes ', 'isolated tornados '] + assert test_callback.transcript[1][0]['transcript'] == 'and heavy rain ' + def test_on_transcription_interim_results_true(self): + class MyRecognizeCallback(RecognizeCallback): + def __init__(self): + RecognizeCallback.__init__(self) + self.error = None + self.transcript = None + def on_error(self, error): + self.error = error + def on_transcription(self, transcript): + self.transcript = transcript + assert transcript[0]['confidence'] is not None + assert transcript[0]['transcript'] is not None + test_callback = MyRecognizeCallback() + with open(os.path.join(os.path.dirname(__file__), '../../resources/speech_with_pause.wav'), 'rb') as audio_file: + audio_source = AudioSource(audio_file, False) + self.speech_to_text.recognize_using_websocket(audio_source, "audio/wav", test_callback, model="en-US_Telephony", + interim_results=True, low_latency=True) + assert test_callback.error is None + assert test_callback.transcript is not None + assert test_callback.transcript[0]['transcript'] == 'and heavy rain ' + def test_on_transcription_interim_results_true_low_latency_false(self): + class MyRecognizeCallback(RecognizeCallback): + def __init__(self): + RecognizeCallback.__init__(self) + self.error = None + self.transcript = None + def on_error(self, error): + self.error = error + def on_transcription(self, transcript): + self.transcript = transcript + assert transcript[0]['confidence'] is not None + assert transcript[0]['transcript'] is not None + test_callback = MyRecognizeCallback() + with open(os.path.join(os.path.dirname(__file__), '../../resources/speech_with_pause.wav'), 'rb') as audio_file: + audio_source = AudioSource(audio_file, False) + self.speech_to_text.recognize_using_websocket(audio_source, "audio/wav", test_callback, model="en-US_Telephony", + interim_results=True, low_latency=False) + assert test_callback.error is None + assert test_callback.transcript is not None + assert test_callback.transcript[0]['transcript'] == 'and heavy rain ' + def test_custom_grammars(self): customization_id = None - for custom_model in self.custom_models['customizations']: + for custom_model in self.custom_models.get('customizations'): if custom_model['name'] == 'integration_test_model_for_grammar': customization_id = custom_model['customization_id'] break @@ -117,37 +189,37 @@ def test_custom_grammars(self): print('Creating a new custom model') create_custom_model_for_grammar = self.speech_to_text.create_language_model( name="integration_test_model_for_grammar", - base_model_name="en-US_BroadbandModel" - ).get_result() - customization_id = create_custom_model_for_grammar['customization_id'] + base_model_name="en-US_BroadbandModel").get_result() + customization_id = create_custom_model_for_grammar[ + 'customization_id'] grammars = self.speech_to_text.list_grammars( - customization_id - ).get_result()['grammars'] + customization_id).get_result()['grammars'] if not grammars: - with open(os.path.join(os.path.dirname(__file__), '../../resources/confirm-grammar.xml'), 'rb') as grammar_file: + with open( + os.path.join(os.path.dirname(__file__), + '../../resources/confirm-grammar.xml'), + 'rb') as grammar_file: add_grammar_result = self.speech_to_text.add_grammar( customization_id, grammar_name='test-add-grammar-python', grammar_file=grammar_file, content_type='application/srgs+xml', - allow_overwrite=True - ).get_result() + allow_overwrite=True).get_result() assert add_grammar_result is not None get_grammar_result = self.speech_to_text.get_grammar( customization_id, - grammar_name='test-add-grammar-python' - ).get_result() + grammar_name='test-add-grammar-python').get_result() assert get_grammar_result is not None else: print('Deleting grammar') - delete_grammar_result = self.speech_to_text.delete_grammar( - customization_id, - 'test-add-grammar-python' - ).get_result() - assert delete_grammar_result is not None + try: + self.speech_to_text.delete_grammar( + customization_id, 'test-add-grammar-python').get_result() + except ibm_watson.ApiException as ex: + print('Could not delete grammar: {0}'.format(ex.message)) try: self.speech_to_text.delete_language_model(customization_id) diff --git a/test/integration/test_text_to_speech_v1.py b/test/integration/test_text_to_speech_v1.py index 6da05df7a..407abd68d 100644 --- a/test/integration/test_text_to_speech_v1.py +++ b/test/integration/test_text_to_speech_v1.py @@ -5,8 +5,9 @@ import pytest import os -@pytest.mark.skipif( - os.getenv('VCAP_SERVICES') is None, reason='requires VCAP_SERVICES') + +@pytest.mark.skipif(os.getenv('TEXT_TO_SPEECH_APIKEY') is None, + reason='requires TEXT_TO_SPEECH_APIKEY') class TestIntegrationTextToSpeechV1(unittest.TestCase): text_to_speech = None original_customizations = None @@ -16,25 +17,25 @@ class TestIntegrationTextToSpeechV1(unittest.TestCase): def setup_class(cls): cls.text_to_speech = ibm_watson.TextToSpeechV1() cls.text_to_speech.set_default_headers({ - 'X-Watson-Learning-Opt-Out': - '1', - 'X-Watson-Test': - '1' + 'X-Watson-Learning-Opt-Out': '1', + 'X-Watson-Test': '1' }) - cls.original_customizations = cls.text_to_speech.list_voice_models().get_result() - cls.created_customization = cls.text_to_speech.create_voice_model( + cls.original_customizations = cls.text_to_speech.list_custom_models( + ).get_result() + cls.created_customization = cls.text_to_speech.create_custom_model( name="test_integration_customization", description="customization for tests").get_result() @classmethod def teardown_class(cls): - custid = cls.created_customization['customization_id'] - cls.text_to_speech.delete_voice_model(customization_id=custid) + custid = cls.created_customization.get('customization_id') + cls.text_to_speech.delete_custom_model(customization_id=custid) def test_voices(self): output = self.text_to_speech.list_voices().get_result() assert output['voices'] is not None - voice = self.text_to_speech.get_voice(output['voices'][0]['name']).get_result() + voice = self.text_to_speech.get_voice( + output['voices'][0]['name']).get_result() assert voice is not None def test_speak(self): @@ -49,28 +50,62 @@ def test_pronunciation(self): assert output['pronunciation'] is not None def test_customizations(self): - old_length = len(self.original_customizations['customizations']) - new_length = len( - self.text_to_speech.list_voice_models().get_result()['customizations']) + old_length = len(self.original_customizations.get('customizations')) + new_length = len(self.text_to_speech.list_custom_models().get_result() + ['customizations']) assert new_length - old_length >= 1 def test_custom_words(self): - customization_id = self.created_customization['customization_id'] - words = self.text_to_speech.list_words(customization_id).get_result()['words'] - assert len(words) == 0 # pylint: disable=len-as-condition - self.text_to_speech.add_word( - customization_id, word="ACLs", translation="ackles") + customization_id = self.created_customization.get('customization_id') + words = self.text_to_speech.list_words( + customization_id).get_result()['words'] + assert not words + self.text_to_speech.add_word(customization_id, + word="ACLs", + translation="ackles") words = [{"word": "MACLs", "translation": "mackles"}] self.text_to_speech.add_words(customization_id, words) self.text_to_speech.delete_word(customization_id, 'ACLs') - word = self.text_to_speech.get_word(customization_id, 'MACLs').get_result() + word = self.text_to_speech.get_word(customization_id, + 'MACLs').get_result() assert word['translation'] == 'mackles' + def test_custom_prompts(self): + customization_id = self.created_customization.get('customization_id') + prompt_id = "Hello" + metadata = { + "prompt_text": "Hello how are you today?" + } + + with open("resources/tts_audio.wav", "rb") as audio_file: + self.text_to_speech.add_custom_prompt( + customization_id, prompt_id, metadata, audio_file + ).get_result() + prompts = self.text_to_speech.list_custom_prompts(customization_id).get_result() + assert len(prompts) > 0 + prompt = self.text_to_speech.get_custom_prompt(customization_id, prompt_id).get_result() + assert prompt["prompt_id"] == prompt_id + self.text_to_speech.delete_custom_prompt(customization_id, prompt_id) + + def test_speaker_models(self): + speaker_name = "Angelo" + + with open("resources/tts_audio.wav", "rb") as audio_file: + speaker_id = self.text_to_speech.create_speaker_model( + speaker_name, audio_file + ).get_result()["speaker_id"] + speaker_models = self.text_to_speech.list_speaker_models().get_result() + assert len(speaker_models) > 0 + speaker_model = self.text_to_speech.get_speaker_model(speaker_id).get_result() + self.text_to_speech.delete_speaker_model(speaker_id) + def test_synthesize_using_websocket(self): file = 'tongue_twister.wav' + class MySynthesizeCallback(SynthesizeCallback): + def __init__(self): SynthesizeCallback.__init__(self) self.fd = None @@ -89,11 +124,46 @@ def on_close(self): self.fd.close() test_callback = MySynthesizeCallback() - self.text_to_speech.synthesize_using_websocket('She sells seashells by the seashore', - test_callback, - accept='audio/wav', - voice='en-GB_KateVoice' - ) + self.text_to_speech.synthesize_using_websocket( + 'She sells seashells by the seashore', + test_callback, + accept='audio/wav', + voice='en-GB_KateVoice') + assert test_callback.error is None + assert test_callback.fd is not None + assert os.stat(file).st_size > 0 + os.remove(file) + + # This is test will only be meaningful so long as en-AU_CraigVoice is a Neural type voice model + # Check this url for all Neutral type voice models: https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-voices#languageVoices + def test_synthesize_using_websocket_neural(self): + file = 'tongue_twister.wav' + + class MySynthesizeCallback(SynthesizeCallback): + + def __init__(self): + SynthesizeCallback.__init__(self) + self.fd = None + self.error = None + + def on_connected(self): + self.fd = open(file, 'ab') + + def on_error(self, error): + self.error = error + + def on_audio_stream(self, audio_stream): + self.fd.write(audio_stream) + + def on_close(self): + self.fd.close() + + test_callback = MySynthesizeCallback() + self.text_to_speech.synthesize_using_websocket( + 'She sells seashells by the seashore', + test_callback, + accept='audio/wav', + voice='en-GB_JamesV3Voice') assert test_callback.error is None assert test_callback.fd is not None assert os.stat(file).st_size > 0 diff --git a/test/integration/test_visual_recognition.py b/test/integration/test_visual_recognition.py deleted file mode 100644 index 152ec9d24..000000000 --- a/test/integration/test_visual_recognition.py +++ /dev/null @@ -1,59 +0,0 @@ -# coding: utf-8 -import pytest -import ibm_watson -import os -from os.path import abspath -from unittest import TestCase - -@pytest.mark.skipif( - os.getenv('VCAP_SERVICES') is None, reason='requires VCAP_SERVICES') -class IntegrationTestVisualRecognitionV3(TestCase): - visual_recognition = None - classifier_id = None - - @classmethod - def setup_class(cls): - cls.visual_recognition = ibm_watson.VisualRecognitionV3('2018-03-19') - cls.visual_recognition.set_default_headers({ - 'X-Watson-Learning-Opt-Out': - '1', - 'X-Watson-Test': - '1' - }) - cls.classifier_id = 'sdkxtestxclassifierxdoxnotxdel_1089651138' - - def test_classify(self): - dog_path = abspath('resources/dog.jpg') - with open(dog_path, 'rb') as image_file: - dog_results = self.visual_recognition.classify( - images_file=image_file, - threshold='0.1', - classifier_ids=['default']).get_result() - assert dog_results is not None - - def test_detect_faces(self): - output = self.visual_recognition.detect_faces( - url='https://www.ibm.com/ibm/ginni/images/ginni_bio_780x981_v4_03162016.jpg').get_result() - assert output is not None - - @pytest.mark.skip(reason="Time consuming") - def test_custom_classifier(self): - with open(abspath('resources/cars.zip'), 'rb') as cars, \ - open(abspath('resources/trucks.zip'), 'rb') as trucks: - classifier = self.visual_recognition.create_classifier( - 'CarsVsTrucks', - positive_examples={'cars': cars}, - negative_examples=trucks, - ).get_result() - - assert classifier is not None - - classifier_id = classifier['classifier_id'] - output = self.visual_recognition.get_classifier(classifier_id).get_result() - assert output is not None - - output = self.visual_recognition.delete_classifier(classifier_id).get_result() - - def test_core_ml_model(self): - core_ml_model = self.visual_recognition.get_core_ml_model(self.classifier_id).get_result() - assert core_ml_model.ok diff --git a/test/unit/__init__.py b/test/unit/__init__.py index 161119efe..949039f3b 100644 --- a/test/unit/__init__.py +++ b/test/unit/__init__.py @@ -1,5 +1,4 @@ # coding: utf-8 -from __future__ import print_function from dotenv import load_dotenv, find_dotenv # load the .env file containing your environment variables for the required diff --git a/test/unit/test_assistant_v1.py b/test/unit/test_assistant_v1.py index 757349940..82781955f 100644 --- a/test/unit/test_assistant_v1.py +++ b/test/unit/test_assistant_v1.py @@ -1,1520 +1,13086 @@ -# coding: utf-8 +# -*- coding: utf-8 -*- +# (C) Copyright IBM Corp. 2019, 2024. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Unit Tests for AssistantV1 +""" + +from datetime import datetime, timezone +from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator +from ibm_cloud_sdk_core.utils import datetime_to_string, string_to_datetime +import inspect import json -import datetime -from dateutil.tz import tzutc +import pytest +import re +import requests import responses -import ibm_watson -from ibm_watson import ApiException -from ibm_watson.assistant_v1 import Context, Counterexample, \ - CounterexampleCollection, Entity, EntityCollection, Example, \ - ExampleCollection, MessageInput, Intent, IntentCollection, Synonym, \ - SynonymCollection, Value, ValueCollection, Workspace, WorkspaceCollection - -platform_url = 'https://gateway.watsonplatform.net' -service_path = '/assistant/api' -base_url = '{0}{1}'.format(platform_url, service_path) - -######################### -# counterexamples -######################### - - -@responses.activate -def test_create_counterexample(): - endpoint = '/v1/workspaces/{0}/counterexamples'.format('boguswid') - url = '{0}{1}'.format(base_url, endpoint) - response = { - "text": "I want financial advice today.", - "created": "2016-07-11T16:39:01.774Z", - "updated": "2015-12-07T18:53:59.153Z" - } - responses.add( - responses.POST, - url, - body=json.dumps(response), - status=201, - content_type='application/json') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-02-03') - counterexample = service.create_counterexample( - workspace_id='boguswid', text='I want financial advice today.').get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert counterexample == response - # Verify that response can be converted to a Counterexample - Counterexample._from_dict(counterexample) - -@responses.activate -def test_rate_limit_exceeded(): - endpoint = '/v1/workspaces/{0}/counterexamples'.format('boguswid') - url = '{0}{1}'.format(base_url, endpoint) - error_code = 429 - error_msg = 'Rate limit exceeded' - responses.add( - responses.POST, - url, - body='Rate limit exceeded', - status=429, - content_type='application/json') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-02-03') - try: - service.create_counterexample( - workspace_id='boguswid', text='I want financial advice today.') - except ApiException as ex: - assert len(responses.calls) == 1 - assert isinstance(ex, ApiException) - assert error_code == ex.code - assert error_msg in str(ex) - -@responses.activate -def test_unknown_error(): - endpoint = '/v1/workspaces/{0}/counterexamples'.format('boguswid') - url = '{0}{1}'.format(base_url, endpoint) - error_msg = 'Unknown error' - responses.add( - responses.POST, - url, - status=407, - content_type='application/json') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-02-03') - try: - service.create_counterexample( - workspace_id='boguswid', text='I want financial advice today.') - except ApiException as ex: - assert len(responses.calls) == 1 - assert error_msg in str(ex) - -@responses.activate -def test_delete_counterexample(): - endpoint = '/v1/workspaces/{0}/counterexamples/{1}'.format( - 'boguswid', 'I%20want%20financial%20advice%20today') - url = '{0}{1}'.format(base_url, endpoint) - response = None - responses.add( - responses.DELETE, - url, - body=response, - status=204, - content_type='application/json') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-02-03') - counterexample = service.delete_counterexample( - workspace_id='boguswid', text='I want financial advice today').get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert counterexample is None - - -@responses.activate -def test_get_counterexample(): - endpoint = '/v1/workspaces/{0}/counterexamples/{1}'.format( - 'boguswid', 'What%20are%20you%20wearing%3F') - url = '{0}{1}'.format(base_url, endpoint) - response = { - "text": "What are you wearing?", - "created": "2016-07-11T23:53:59.153Z", - "updated": "2016-12-07T18:53:59.153Z" - } - responses.add( - responses.GET, - url, - body=json.dumps(response), - status=200, - content_type='application/json') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-02-03') - counterexample = service.get_counterexample( - workspace_id='boguswid', text='What are you wearing?').get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert counterexample == response - # Verify that response can be converted to a Counterexample - Counterexample._from_dict(counterexample) - -@responses.activate -def test_list_counterexamples(): - endpoint = '/v1/workspaces/{0}/counterexamples'.format('boguswid') - url = '{0}{1}'.format(base_url, endpoint) - response = { - "counterexamples": [{ - "text": "I want financial advice today.", - "created": "2016-07-11T16:39:01.774Z", - "updated": "2015-12-07T18:53:59.153Z" - }, { - "text": "What are you wearing today", - "created": "2016-07-11T16:39:01.774Z", - "updated": "2015-12-07T18:53:59.153Z" - }], - "pagination": { - "refresh_url": - "/v1/workspaces/pizza_app-e0f3/counterexamples?version=2017-12-18&page_limit=2", - "next_url": - "/v1/workspaces/pizza_app-e0f3/counterexamples?cursor=base64=&version=2017-12-18&page_limit=2" - } - } - responses.add( - responses.GET, - url, - body=json.dumps(response), - status=200, - content_type='application/json') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-02-03') - counterexamples = service.list_counterexamples(workspace_id='boguswid').get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert counterexamples == response - # Verify that response can be converted to a CounterexampleCollection - CounterexampleCollection._from_dict(counterexamples) - -@responses.activate -def test_update_counterexample(): - endpoint = '/v1/workspaces/{0}/counterexamples/{1}'.format( - 'boguswid', 'What%20are%20you%20wearing%3F') - url = '{0}{1}'.format(base_url, endpoint) - response = { - "text": "What are you wearing?", - "created": "2016-07-11T23:53:59.153Z", - "updated": "2015-12-07T18:53:59.153Z" - } - responses.add( - responses.POST, - url, - body=json.dumps(response), - status=200, - content_type='application/json') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-02-03') - counterexample = service.update_counterexample( - workspace_id='boguswid', - text='What are you wearing?', - new_text='What are you wearing?').get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert counterexample == response - # Verify that response can be converted to a Counterexample - Counterexample._from_dict(counterexample) - -######################### -# entities -######################### - - -@responses.activate -def test_create_entity(): - endpoint = '/v1/workspaces/{0}/entities'.format('boguswid') - url = '{0}{1}'.format(base_url, endpoint) - response = { - "entity": "pizza_toppings", - "description": "Tasty pizza toppings", - "created": "2015-12-06T04:32:20.000Z", - "updated": "2015-12-07T18:53:59.153Z", - "metadata": { - "property": "value" +import urllib +from ibm_watson.assistant_v1 import * + +version = 'testString' + +_service = AssistantV1( + authenticator=NoAuthAuthenticator(), + version=version, +) + +_base_url = 'https://api.us-south.assistant.watson.cloud.ibm.com' +_service.set_service_url(_base_url) + + +def preprocess_url(operation_path: str): + """ + Returns the request url associated with the specified operation path. + This will be base_url concatenated with a quoted version of operation_path. + The returned request URL is used to register the mock response so it needs + to match the request URL that is formed by the requests library. + """ + + # Form the request URL from the base URL and operation path. + request_url = _base_url + operation_path + + # If the request url does NOT end with a /, then just return it as-is. + # Otherwise, return a regular expression that matches one or more trailing /. + if not request_url.endswith('/'): + return request_url + return re.compile(request_url.rstrip('/') + '/+') + + +############################################################################## +# Start of Service: Message +############################################################################## +# region + + +class TestMessage: + """ + Test Class for message + """ + + @responses.activate + def test_message_all_params(self): + """ + message() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/message') + mock_response = '{"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"anyKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "generic": [{"response_type": "text", "text": "text", "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Construct a dict representation of a MessageInput model + message_input_model = {} + message_input_model['text'] = 'testString' + message_input_model['spelling_suggestions'] = False + message_input_model['spelling_auto_correct'] = False + message_input_model['foo'] = 'testString' + + # Construct a dict representation of a RuntimeIntent model + runtime_intent_model = {} + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + + # Construct a dict representation of a CaptureGroup model + capture_group_model = {} + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + # Construct a dict representation of a RuntimeEntityInterpretation model + runtime_entity_interpretation_model = {} + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + # Construct a dict representation of a RuntimeEntityAlternative model + runtime_entity_alternative_model = {} + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + # Construct a dict representation of a RuntimeEntityRole model + runtime_entity_role_model = {} + runtime_entity_role_model['type'] = 'date_from' + + # Construct a dict representation of a RuntimeEntity model + runtime_entity_model = {} + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + + # Construct a dict representation of a MessageContextMetadata model + message_context_metadata_model = {} + message_context_metadata_model['deployment'] = 'testString' + message_context_metadata_model['user_id'] = 'testString' + + # Construct a dict representation of a Context model + context_model = {} + context_model['conversation_id'] = 'testString' + context_model['system'] = {'anyKey': 'anyValue'} + context_model['metadata'] = message_context_metadata_model + context_model['foo'] = 'testString' + + # Construct a dict representation of a DialogNodeVisitedDetails model + dialog_node_visited_details_model = {} + dialog_node_visited_details_model['dialog_node'] = 'testString' + dialog_node_visited_details_model['title'] = 'testString' + dialog_node_visited_details_model['conditions'] = 'testString' + + # Construct a dict representation of a LogMessageSource model + log_message_source_model = {} + log_message_source_model['type'] = 'dialog_node' + log_message_source_model['dialog_node'] = 'testString' + + # Construct a dict representation of a LogMessage model + log_message_model = {} + log_message_model['level'] = 'info' + log_message_model['msg'] = 'testString' + log_message_model['code'] = 'testString' + log_message_model['source'] = log_message_source_model + + # Construct a dict representation of a ResponseGenericChannel model + response_generic_channel_model = {} + response_generic_channel_model['channel'] = 'chat' + + # Construct a dict representation of a RuntimeResponseGenericRuntimeResponseTypeText model + runtime_response_generic_model = {} + runtime_response_generic_model['response_type'] = 'text' + runtime_response_generic_model['text'] = 'testString' + runtime_response_generic_model['channels'] = [response_generic_channel_model] + + # Construct a dict representation of a OutputData model + output_data_model = {} + output_data_model['nodes_visited'] = ['testString'] + output_data_model['nodes_visited_details'] = [dialog_node_visited_details_model] + output_data_model['log_messages'] = [log_message_model] + output_data_model['generic'] = [runtime_response_generic_model] + output_data_model['foo'] = 'testString' + + # Set up parameter values + workspace_id = 'testString' + input = message_input_model + intents = [runtime_intent_model] + entities = [runtime_entity_model] + alternate_intents = False + context = context_model + output = output_data_model + user_id = 'testString' + nodes_visited_details = False + + # Invoke method + response = _service.message( + workspace_id, + input=input, + intents=intents, + entities=entities, + alternate_intents=alternate_intents, + context=context, + output=output, + user_id=user_id, + nodes_visited_details=nodes_visited_details, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'nodes_visited_details={}'.format('true' if nodes_visited_details else 'false') in query_string + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['input'] == message_input_model + assert req_body['intents'] == [runtime_intent_model] + assert req_body['entities'] == [runtime_entity_model] + assert req_body['alternate_intents'] == False + assert req_body['context'] == context_model + assert req_body['output'] == output_data_model + assert req_body['user_id'] == 'testString' + + def test_message_all_params_with_retries(self): + # Enable retries and run test_message_all_params. + _service.enable_retries() + self.test_message_all_params() + + # Disable retries and run test_message_all_params. + _service.disable_retries() + self.test_message_all_params() + + @responses.activate + def test_message_required_params(self): + """ + test_message_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/message') + mock_response = '{"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"anyKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "generic": [{"response_type": "text", "text": "text", "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + + # Invoke method + response = _service.message( + workspace_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_message_required_params_with_retries(self): + # Enable retries and run test_message_required_params. + _service.enable_retries() + self.test_message_required_params() + + # Disable retries and run test_message_required_params. + _service.disable_retries() + self.test_message_required_params() + + @responses.activate + def test_message_value_error(self): + """ + test_message_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/message') + mock_response = '{"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"anyKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "generic": [{"response_type": "text", "text": "text", "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, } - } - responses.add( - responses.POST, - url, - body=json.dumps(response), - status=201, - content_type='application/json') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-04-21') - entity = service.create_entity( - workspace_id='boguswid', - entity='pizza_toppings', - description='Tasty pizza toppings', - metadata={"property": "value"}, - values=None, - fuzzy_match=None).get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert entity == response - # Verify that response can be converted to an Entity - Entity._from_dict(entity) - -@responses.activate -def test_delete_entity(): - endpoint = '/v1/workspaces/{0}/entities/{1}'.format('boguswid', 'pizza_toppings') - url = '{0}{1}'.format(base_url, endpoint) - response = "" - responses.add( - responses.DELETE, - url, - body=json.dumps(response), - status=200, - content_type='application/json') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-04-21') - entity = service.delete_entity(workspace_id='boguswid', entity='pizza_toppings').get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert entity == "" - - -@responses.activate -def test_get_entity(): - endpoint = '/v1/workspaces/{0}/entities/{1}'.format('boguswid', 'pizza_toppings') - url = '{0}{1}'.format(base_url, endpoint) - response = { - "entity": "pizza_toppings", - "description": "Tasty pizza toppings", - "created": "2015-12-06T04:32:20.000Z", - "updated": "2015-12-07T18:53:59.153Z", - "metadata": { - "property": "value" + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.message(**req_copy) + + def test_message_value_error_with_retries(self): + # Enable retries and run test_message_value_error. + _service.enable_retries() + self.test_message_value_error() + + # Disable retries and run test_message_value_error. + _service.disable_retries() + self.test_message_value_error() + + +# endregion +############################################################################## +# End of Service: Message +############################################################################## + +############################################################################## +# Start of Service: BulkClassify +############################################################################## +# region + + +class TestBulkClassify: + """ + Test Class for bulk_classify + """ + + @responses.activate + def test_bulk_classify_all_params(self): + """ + bulk_classify() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/bulk_classify') + mock_response = '{"output": [{"input": {"text": "text"}, "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "intents": [{"intent": "intent", "confidence": 10}]}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Construct a dict representation of a BulkClassifyUtterance model + bulk_classify_utterance_model = {} + bulk_classify_utterance_model['text'] = 'testString' + + # Set up parameter values + workspace_id = 'testString' + input = [bulk_classify_utterance_model] + + # Invoke method + response = _service.bulk_classify( + workspace_id, + input=input, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['input'] == [bulk_classify_utterance_model] + + def test_bulk_classify_all_params_with_retries(self): + # Enable retries and run test_bulk_classify_all_params. + _service.enable_retries() + self.test_bulk_classify_all_params() + + # Disable retries and run test_bulk_classify_all_params. + _service.disable_retries() + self.test_bulk_classify_all_params() + + @responses.activate + def test_bulk_classify_required_params(self): + """ + test_bulk_classify_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/bulk_classify') + mock_response = '{"output": [{"input": {"text": "text"}, "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "intents": [{"intent": "intent", "confidence": 10}]}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + + # Invoke method + response = _service.bulk_classify( + workspace_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_bulk_classify_required_params_with_retries(self): + # Enable retries and run test_bulk_classify_required_params. + _service.enable_retries() + self.test_bulk_classify_required_params() + + # Disable retries and run test_bulk_classify_required_params. + _service.disable_retries() + self.test_bulk_classify_required_params() + + @responses.activate + def test_bulk_classify_value_error(self): + """ + test_bulk_classify_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/bulk_classify') + mock_response = '{"output": [{"input": {"text": "text"}, "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "intents": [{"intent": "intent", "confidence": 10}]}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, } - } - responses.add( - responses.GET, - url, - body=json.dumps(response), - status=200, - content_type='application/json') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-04-21') - entity = service.get_entity(workspace_id='boguswid', entity='pizza_toppings', export=True).get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert entity == response - # Verify that response can be converted to an Entity - Entity._from_dict(entity) - - -@responses.activate -def test_list_entities(): - endpoint = '/v1/workspaces/{0}/entities'.format('boguswid') - url = '{0}{1}'.format(base_url, endpoint) - response = { - "entities": [{ - "entity": "pizza_toppings", - "description": "Tasty pizza toppings", - "created": "2015-12-06T04:32:20.000Z", - "updated": "2015-12-07T18:53:59.153Z", - "metadata": { - "property": "value" - } - }], - "pagination": { - "refresh_url": - "/v1/workspaces/pizza_app-e0f3/entities?version=2017-12-18&filter=name:pizza&include_count=true&page_limit=1", - "next_url": - "/v1/workspaces/pizza_app-e0f3/entities?cursor=base64=&version=2017-12-18&filter=name:pizza&page_limit=1", - "total": - 1, - "matched": - 1 + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.bulk_classify(**req_copy) + + def test_bulk_classify_value_error_with_retries(self): + # Enable retries and run test_bulk_classify_value_error. + _service.enable_retries() + self.test_bulk_classify_value_error() + + # Disable retries and run test_bulk_classify_value_error. + _service.disable_retries() + self.test_bulk_classify_value_error() + + +# endregion +############################################################################## +# End of Service: BulkClassify +############################################################################## + +############################################################################## +# Start of Service: Workspaces +############################################################################## +# region + + +class TestListWorkspaces: + """ + Test Class for list_workspaces + """ + + @responses.activate + def test_list_workspaces_all_params(self): + """ + list_workspaces() + """ + # Set up mock + url = preprocess_url('/v1/workspaces') + mock_response = '{"workspaces": [{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + page_limit = 100 + include_count = False + sort = 'name' + cursor = 'testString' + include_audit = False + + # Invoke method + response = _service.list_workspaces( + page_limit=page_limit, + include_count=include_count, + sort=sort, + cursor=cursor, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'page_limit={}'.format(page_limit) in query_string + assert 'include_count={}'.format('true' if include_count else 'false') in query_string + assert 'sort={}'.format(sort) in query_string + assert 'cursor={}'.format(cursor) in query_string + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + + def test_list_workspaces_all_params_with_retries(self): + # Enable retries and run test_list_workspaces_all_params. + _service.enable_retries() + self.test_list_workspaces_all_params() + + # Disable retries and run test_list_workspaces_all_params. + _service.disable_retries() + self.test_list_workspaces_all_params() + + @responses.activate + def test_list_workspaces_required_params(self): + """ + test_list_workspaces_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces') + mock_response = '{"workspaces": [{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Invoke method + response = _service.list_workspaces() + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_workspaces_required_params_with_retries(self): + # Enable retries and run test_list_workspaces_required_params. + _service.enable_retries() + self.test_list_workspaces_required_params() + + # Disable retries and run test_list_workspaces_required_params. + _service.disable_retries() + self.test_list_workspaces_required_params() + + @responses.activate + def test_list_workspaces_value_error(self): + """ + test_list_workspaces_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces') + mock_response = '{"workspaces": [{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Pass in all but one required param and check for a ValueError + req_param_dict = { } - } - responses.add( - responses.GET, - url, - body=json.dumps(response), - status=200, - content_type='application/json') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-04-21') - entities = service.list_entities( - workspace_id='boguswid', - export=True).get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert entities == response - # Verify that response can be converted to an EntityCollection - EntityCollection._from_dict(entities) - - -@responses.activate -def test_update_entity(): - endpoint = '/v1/workspaces/{0}/entities/{1}'.format('boguswid', 'pizza_toppings') - url = '{0}{1}'.format(base_url, endpoint) - response = { - "entity": "pizza_toppings", - "description": "Tasty pizza toppings", - "created": "2015-12-06T04:32:20.000Z", - "updated": "2015-12-07T18:53:59.153Z", - "metadata": { - "property": "value" + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.list_workspaces(**req_copy) + + def test_list_workspaces_value_error_with_retries(self): + # Enable retries and run test_list_workspaces_value_error. + _service.enable_retries() + self.test_list_workspaces_value_error() + + # Disable retries and run test_list_workspaces_value_error. + _service.disable_retries() + self.test_list_workspaces_value_error() + + +class TestCreateWorkspace: + """ + Test Class for create_workspace + """ + + @responses.activate + def test_create_workspace_all_params(self): + """ + create_workspace() + """ + # Set up mock + url = preprocess_url('/v1/workspaces') + mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Construct a dict representation of a DialogNodeOutputTextValuesElement model + dialog_node_output_text_values_element_model = {} + dialog_node_output_text_values_element_model['text'] = 'testString' + + # Construct a dict representation of a ResponseGenericChannel model + response_generic_channel_model = {} + response_generic_channel_model['channel'] = 'chat' + + # Construct a dict representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeText model + dialog_node_output_generic_model = {} + dialog_node_output_generic_model['response_type'] = 'text' + dialog_node_output_generic_model['values'] = [dialog_node_output_text_values_element_model] + dialog_node_output_generic_model['selection_policy'] = 'sequential' + dialog_node_output_generic_model['delimiter'] = '\\n' + dialog_node_output_generic_model['channels'] = [response_generic_channel_model] + + # Construct a dict representation of a DialogNodeOutputModifiers model + dialog_node_output_modifiers_model = {} + dialog_node_output_modifiers_model['overwrite'] = True + + # Construct a dict representation of a DialogNodeOutput model + dialog_node_output_model = {} + dialog_node_output_model['generic'] = [dialog_node_output_generic_model] + dialog_node_output_model['integrations'] = {'key1': {'anyKey': 'anyValue'}} + dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model + dialog_node_output_model['foo'] = 'testString' + + # Construct a dict representation of a DialogNodeContext model + dialog_node_context_model = {} + dialog_node_context_model['integrations'] = {'key1': {'anyKey': 'anyValue'}} + dialog_node_context_model['foo'] = 'testString' + + # Construct a dict representation of a DialogNodeNextStep model + dialog_node_next_step_model = {} + dialog_node_next_step_model['behavior'] = 'get_user_input' + dialog_node_next_step_model['dialog_node'] = 'testString' + dialog_node_next_step_model['selector'] = 'condition' + + # Construct a dict representation of a DialogNodeAction model + dialog_node_action_model = {} + dialog_node_action_model['name'] = 'testString' + dialog_node_action_model['type'] = 'client' + dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'} + dialog_node_action_model['result_variable'] = 'testString' + dialog_node_action_model['credentials'] = 'testString' + + # Construct a dict representation of a DialogNode model + dialog_node_model = {} + dialog_node_model['dialog_node'] = 'testString' + dialog_node_model['description'] = 'testString' + dialog_node_model['conditions'] = 'testString' + dialog_node_model['parent'] = 'testString' + dialog_node_model['previous_sibling'] = 'testString' + dialog_node_model['output'] = dialog_node_output_model + dialog_node_model['context'] = dialog_node_context_model + dialog_node_model['metadata'] = {'anyKey': 'anyValue'} + dialog_node_model['next_step'] = dialog_node_next_step_model + dialog_node_model['title'] = 'testString' + dialog_node_model['type'] = 'standard' + dialog_node_model['event_name'] = 'focus' + dialog_node_model['variable'] = 'testString' + dialog_node_model['actions'] = [dialog_node_action_model] + dialog_node_model['digress_in'] = 'not_available' + dialog_node_model['digress_out'] = 'allow_returning' + dialog_node_model['digress_out_slots'] = 'not_allowed' + dialog_node_model['user_label'] = 'testString' + dialog_node_model['disambiguation_opt_out'] = False + + # Construct a dict representation of a Counterexample model + counterexample_model = {} + counterexample_model['text'] = 'testString' + + # Construct a dict representation of a WorkspaceSystemSettingsTooling model + workspace_system_settings_tooling_model = {} + workspace_system_settings_tooling_model['store_generic_responses'] = True + + # Construct a dict representation of a WorkspaceSystemSettingsDisambiguation model + workspace_system_settings_disambiguation_model = {} + workspace_system_settings_disambiguation_model['prompt'] = 'testString' + workspace_system_settings_disambiguation_model['none_of_the_above_prompt'] = 'testString' + workspace_system_settings_disambiguation_model['enabled'] = False + workspace_system_settings_disambiguation_model['sensitivity'] = 'auto' + workspace_system_settings_disambiguation_model['randomize'] = True + workspace_system_settings_disambiguation_model['max_suggestions'] = 1 + workspace_system_settings_disambiguation_model['suggestion_text_policy'] = 'testString' + + # Construct a dict representation of a WorkspaceSystemSettingsSystemEntities model + workspace_system_settings_system_entities_model = {} + workspace_system_settings_system_entities_model['enabled'] = False + + # Construct a dict representation of a WorkspaceSystemSettingsOffTopic model + workspace_system_settings_off_topic_model = {} + workspace_system_settings_off_topic_model['enabled'] = False + + # Construct a dict representation of a WorkspaceSystemSettingsNlp model + workspace_system_settings_nlp_model = {} + workspace_system_settings_nlp_model['model'] = 'testString' + + # Construct a dict representation of a WorkspaceSystemSettings model + workspace_system_settings_model = {} + workspace_system_settings_model['tooling'] = workspace_system_settings_tooling_model + workspace_system_settings_model['disambiguation'] = workspace_system_settings_disambiguation_model + workspace_system_settings_model['human_agent_assist'] = {'anyKey': 'anyValue'} + workspace_system_settings_model['spelling_suggestions'] = False + workspace_system_settings_model['spelling_auto_correct'] = False + workspace_system_settings_model['system_entities'] = workspace_system_settings_system_entities_model + workspace_system_settings_model['off_topic'] = workspace_system_settings_off_topic_model + workspace_system_settings_model['nlp'] = workspace_system_settings_nlp_model + workspace_system_settings_model['foo'] = 'testString' + + # Construct a dict representation of a WebhookHeader model + webhook_header_model = {} + webhook_header_model['name'] = 'testString' + webhook_header_model['value'] = 'testString' + + # Construct a dict representation of a Webhook model + webhook_model = {} + webhook_model['url'] = 'testString' + webhook_model['name'] = 'testString' + webhook_model['headers'] = [webhook_header_model] + + # Construct a dict representation of a Mention model + mention_model = {} + mention_model['entity'] = 'testString' + mention_model['location'] = [38] + + # Construct a dict representation of a Example model + example_model = {} + example_model['text'] = 'testString' + example_model['mentions'] = [mention_model] + + # Construct a dict representation of a CreateIntent model + create_intent_model = {} + create_intent_model['intent'] = 'testString' + create_intent_model['description'] = 'testString' + create_intent_model['examples'] = [example_model] + + # Construct a dict representation of a CreateValue model + create_value_model = {} + create_value_model['value'] = 'testString' + create_value_model['metadata'] = {'anyKey': 'anyValue'} + create_value_model['type'] = 'synonyms' + create_value_model['synonyms'] = ['testString'] + create_value_model['patterns'] = ['testString'] + + # Construct a dict representation of a CreateEntity model + create_entity_model = {} + create_entity_model['entity'] = 'testString' + create_entity_model['description'] = 'testString' + create_entity_model['metadata'] = {'anyKey': 'anyValue'} + create_entity_model['fuzzy_match'] = True + create_entity_model['values'] = [create_value_model] + + # Set up parameter values + name = 'testString' + description = 'testString' + language = 'testString' + dialog_nodes = [dialog_node_model] + counterexamples = [counterexample_model] + metadata = {'anyKey': 'anyValue'} + learning_opt_out = False + system_settings = workspace_system_settings_model + webhooks = [webhook_model] + intents = [create_intent_model] + entities = [create_entity_model] + include_audit = False + + # Invoke method + response = _service.create_workspace( + name=name, + description=description, + language=language, + dialog_nodes=dialog_nodes, + counterexamples=counterexamples, + metadata=metadata, + learning_opt_out=learning_opt_out, + system_settings=system_settings, + webhooks=webhooks, + intents=intents, + entities=entities, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['name'] == 'testString' + assert req_body['description'] == 'testString' + assert req_body['language'] == 'testString' + assert req_body['dialog_nodes'] == [dialog_node_model] + assert req_body['counterexamples'] == [counterexample_model] + assert req_body['metadata'] == {'anyKey': 'anyValue'} + assert req_body['learning_opt_out'] == False + assert req_body['system_settings'] == workspace_system_settings_model + assert req_body['webhooks'] == [webhook_model] + assert req_body['intents'] == [create_intent_model] + assert req_body['entities'] == [create_entity_model] + + def test_create_workspace_all_params_with_retries(self): + # Enable retries and run test_create_workspace_all_params. + _service.enable_retries() + self.test_create_workspace_all_params() + + # Disable retries and run test_create_workspace_all_params. + _service.disable_retries() + self.test_create_workspace_all_params() + + @responses.activate + def test_create_workspace_required_params(self): + """ + test_create_workspace_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces') + mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Invoke method + response = _service.create_workspace() + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + + def test_create_workspace_required_params_with_retries(self): + # Enable retries and run test_create_workspace_required_params. + _service.enable_retries() + self.test_create_workspace_required_params() + + # Disable retries and run test_create_workspace_required_params. + _service.disable_retries() + self.test_create_workspace_required_params() + + @responses.activate + def test_create_workspace_value_error(self): + """ + test_create_workspace_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces') + mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Pass in all but one required param and check for a ValueError + req_param_dict = { } - } - responses.add( - responses.POST, - url, - body=json.dumps(response), - status=200, - content_type='application/json') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-04-21') - entity = service.update_entity( - workspace_id='boguswid', - entity='pizza_toppings', - new_entity='pizza_toppings').get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert entity == response - # Verify that response can be converted to an Entity - Entity._from_dict(entity) - - -######################### -# examples -######################### - - -@responses.activate -def test_create_example(): - endpoint = '/v1/workspaces/{0}/intents/{1}/examples'.format( - 'boguswid', 'pizza_order') - url = '{0}{1}'.format(base_url, endpoint) - response = { - "text": "Gimme a pizza with pepperoni", - "created": "2016-07-11T16:39:01.774Z", - "updated": "2015-12-07T18:53:59.153Z" - } - responses.add( - responses.POST, - url, - body=json.dumps(response), - status=201, - content_type='application/json') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-02-03') - example = service.create_example( - workspace_id='boguswid', - intent='pizza_order', - text='Gimme a pizza with pepperoni', - mentions=[{'entity': 'xxx', 'location': [0, 1]}]).get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert example == response - # Verify that response can be converted to an Example - Example._from_dict(example) - - -@responses.activate -def test_delete_example(): - endpoint = '/v1/workspaces/{0}/intents/{1}/examples/{2}'.format( - 'boguswid', 'pizza_order', 'Gimme%20a%20pizza%20with%20pepperoni') - url = '{0}{1}'.format(base_url, endpoint) - response = {} - responses.add( - responses.DELETE, - url, - body=json.dumps(response), - status=204, - content_type='') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-02-03') - example = service.delete_example( - workspace_id='boguswid', - intent='pizza_order', - text='Gimme a pizza with pepperoni').get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert example is None - - -@responses.activate -def test_get_example(): - endpoint = '/v1/workspaces/{0}/intents/{1}/examples/{2}'.format( - 'boguswid', 'pizza_order', 'Gimme%20a%20pizza%20with%20pepperoni') - url = '{0}{1}'.format(base_url, endpoint) - response = { - "text": "Gimme a pizza with pepperoni", - "created": "2016-07-11T23:53:59.153Z", - "updated": "2016-12-07T18:53:59.153Z" - } - responses.add( - responses.GET, - url, - body=json.dumps(response), - status=200, - content_type='application/json') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-02-03') - example = service.get_example( - workspace_id='boguswid', - intent='pizza_order', - text='Gimme a pizza with pepperoni').get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert example == response - # Verify that response can be converted to an Example - Example._from_dict(example) - - -@responses.activate -def test_list_examples(): - endpoint = '/v1/workspaces/{0}/intents/{1}/examples'.format( - 'boguswid', 'pizza_order') - url = '{0}{1}'.format(base_url, endpoint) - response = { - "examples": [{ - "text": "Can I order a pizza?", - "created": "2016-07-11T16:39:01.774Z", - "updated": "2015-12-07T18:53:59.153Z" - }, { - "text": "Gimme a pizza with pepperoni", - "created": "2016-07-11T16:39:01.774Z", - "updated": "2015-12-07T18:53:59.153Z" - }], - "pagination": { - "refresh_url": - "/v1/workspaces/pizza_app-e0f3/intents/order/examples?version=2017-12-18&page_limit=2", - "next_url": - "/v1/workspaces/pizza_app-e0f3/intents/order/examples?cursor=base64=&version=2017-12-18&page_limit=2" + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.create_workspace(**req_copy) + + def test_create_workspace_value_error_with_retries(self): + # Enable retries and run test_create_workspace_value_error. + _service.enable_retries() + self.test_create_workspace_value_error() + + # Disable retries and run test_create_workspace_value_error. + _service.disable_retries() + self.test_create_workspace_value_error() + + +class TestGetWorkspace: + """ + Test Class for get_workspace + """ + + @responses.activate + def test_get_workspace_all_params(self): + """ + get_workspace() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString') + mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + export = False + include_audit = False + sort = 'stable' + + # Invoke method + response = _service.get_workspace( + workspace_id, + export=export, + include_audit=include_audit, + sort=sort, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'export={}'.format('true' if export else 'false') in query_string + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + assert 'sort={}'.format(sort) in query_string + + def test_get_workspace_all_params_with_retries(self): + # Enable retries and run test_get_workspace_all_params. + _service.enable_retries() + self.test_get_workspace_all_params() + + # Disable retries and run test_get_workspace_all_params. + _service.disable_retries() + self.test_get_workspace_all_params() + + @responses.activate + def test_get_workspace_required_params(self): + """ + test_get_workspace_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString') + mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + + # Invoke method + response = _service.get_workspace( + workspace_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_get_workspace_required_params_with_retries(self): + # Enable retries and run test_get_workspace_required_params. + _service.enable_retries() + self.test_get_workspace_required_params() + + # Disable retries and run test_get_workspace_required_params. + _service.disable_retries() + self.test_get_workspace_required_params() + + @responses.activate + def test_get_workspace_value_error(self): + """ + test_get_workspace_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString') + mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, } - } - responses.add( - responses.GET, - url, - body=json.dumps(response), - status=200, - content_type='application/json') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-02-03') - examples = service.list_examples( - workspace_id='boguswid', intent='pizza_order').get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert examples == response - # Verify that response can be converted to an ExampleCollection - ExampleCollection._from_dict(examples) - - -@responses.activate -def test_update_example(): - endpoint = '/v1/workspaces/{0}/intents/{1}/examples/{2}'.format( - 'boguswid', 'pizza_order', 'Gimme%20a%20pizza%20with%20pepperoni') - url = '{0}{1}'.format(base_url, endpoint) - response = { - "text": "Gimme a pizza with pepperoni", - "created": "2016-07-11T23:53:59.153Z", - "updated": "2015-12-07T18:53:59.153Z" - } - responses.add( - responses.POST, - url, - body=json.dumps(response), - status=200, - content_type='application/json') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-02-03') - example = service.update_example( - workspace_id='boguswid', - intent='pizza_order', - text='Gimme a pizza with pepperoni', - new_text='Gimme a pizza with pepperoni', - new_mentions=[{'entity': 'xxx', 'location': [0, 1]}]).get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert example == response - # Verify that response can be converted to an Example - Example._from_dict(example) - - -######################### -# intents -######################### - - -@responses.activate -def test_create_intent(): - endpoint = '/v1/workspaces/{0}/intents'.format('boguswid') - url = '{0}{1}'.format(base_url, endpoint) - response = { - "intent": "pizza_order", - "created": "2015-12-06T23:53:59.153Z", - "updated": "2015-12-07T18:53:59.153Z", - "description": "User wants to start a new pizza order" - } - responses.add( - responses.POST, - url, - body=json.dumps(response), - status=201, - content_type='application/json') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-02-03') - intent = service.create_intent( - workspace_id='boguswid', - intent='pizza_order', - description='User wants to start a new pizza order').get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert intent == response - # Verify that response can be converted to an Intent - Intent._from_dict(intent) - - -@responses.activate -def test_delete_intent(): - endpoint = '/v1/workspaces/{0}/intents/{1}'.format('boguswid', - 'pizza_order') - url = '{0}{1}'.format(base_url, endpoint) - response = None - responses.add( - responses.DELETE, - url, - body=json.dumps(response), - status=204, - content_type='') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-02-03') - intent = service.delete_intent( - workspace_id='boguswid', intent='pizza_order').get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert intent is None - - -@responses.activate -def test_get_intent(): - endpoint = '/v1/workspaces/{0}/intents/{1}'.format('boguswid', - 'pizza_order') - url = '{0}{1}'.format(base_url, endpoint) - response = { - "intent": "pizza_order", - "created": "2015-12-06T23:53:59.153Z", - "updated": "2015-12-07T18:53:59.153Z", - "description": "User wants to start a new pizza order" - } - responses.add( - responses.GET, - url, - body=json.dumps(response), - status=200, - content_type='application/json') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-02-03') - intent = service.get_intent( - workspace_id='boguswid', intent='pizza_order', export=False).get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert intent == response - # Verify that response can be converted to an Intent - Intent._from_dict(intent) - -@responses.activate -def test_list_intents(): - endpoint = '/v1/workspaces/{0}/intents'.format('boguswid') - url = '{0}{1}'.format(base_url, endpoint) - response = { - "intents": [{ - "intent": "pizza_order", - "created": "2015-12-06T23:53:59.153Z", - "updated": "2015-12-07T18:53:59.153Z", - "description": "User wants to start a new pizza order" - }], - "pagination": { - "refresh_url": - "/v1/workspaces/pizza_app-e0f3/intents?version=2017-12-18&page_limit=1", - "next_url": - "/v1/workspaces/pizza_app-e0f3/intents?cursor=base64=&version=2017-12-18&page_limit=1" + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_workspace(**req_copy) + + def test_get_workspace_value_error_with_retries(self): + # Enable retries and run test_get_workspace_value_error. + _service.enable_retries() + self.test_get_workspace_value_error() + + # Disable retries and run test_get_workspace_value_error. + _service.disable_retries() + self.test_get_workspace_value_error() + + +class TestUpdateWorkspace: + """ + Test Class for update_workspace + """ + + @responses.activate + def test_update_workspace_all_params(self): + """ + update_workspace() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString') + mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Construct a dict representation of a DialogNodeOutputTextValuesElement model + dialog_node_output_text_values_element_model = {} + dialog_node_output_text_values_element_model['text'] = 'testString' + + # Construct a dict representation of a ResponseGenericChannel model + response_generic_channel_model = {} + response_generic_channel_model['channel'] = 'chat' + + # Construct a dict representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeText model + dialog_node_output_generic_model = {} + dialog_node_output_generic_model['response_type'] = 'text' + dialog_node_output_generic_model['values'] = [dialog_node_output_text_values_element_model] + dialog_node_output_generic_model['selection_policy'] = 'sequential' + dialog_node_output_generic_model['delimiter'] = '\\n' + dialog_node_output_generic_model['channels'] = [response_generic_channel_model] + + # Construct a dict representation of a DialogNodeOutputModifiers model + dialog_node_output_modifiers_model = {} + dialog_node_output_modifiers_model['overwrite'] = True + + # Construct a dict representation of a DialogNodeOutput model + dialog_node_output_model = {} + dialog_node_output_model['generic'] = [dialog_node_output_generic_model] + dialog_node_output_model['integrations'] = {'key1': {'anyKey': 'anyValue'}} + dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model + dialog_node_output_model['foo'] = 'testString' + + # Construct a dict representation of a DialogNodeContext model + dialog_node_context_model = {} + dialog_node_context_model['integrations'] = {'key1': {'anyKey': 'anyValue'}} + dialog_node_context_model['foo'] = 'testString' + + # Construct a dict representation of a DialogNodeNextStep model + dialog_node_next_step_model = {} + dialog_node_next_step_model['behavior'] = 'get_user_input' + dialog_node_next_step_model['dialog_node'] = 'testString' + dialog_node_next_step_model['selector'] = 'condition' + + # Construct a dict representation of a DialogNodeAction model + dialog_node_action_model = {} + dialog_node_action_model['name'] = 'testString' + dialog_node_action_model['type'] = 'client' + dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'} + dialog_node_action_model['result_variable'] = 'testString' + dialog_node_action_model['credentials'] = 'testString' + + # Construct a dict representation of a DialogNode model + dialog_node_model = {} + dialog_node_model['dialog_node'] = 'testString' + dialog_node_model['description'] = 'testString' + dialog_node_model['conditions'] = 'testString' + dialog_node_model['parent'] = 'testString' + dialog_node_model['previous_sibling'] = 'testString' + dialog_node_model['output'] = dialog_node_output_model + dialog_node_model['context'] = dialog_node_context_model + dialog_node_model['metadata'] = {'anyKey': 'anyValue'} + dialog_node_model['next_step'] = dialog_node_next_step_model + dialog_node_model['title'] = 'testString' + dialog_node_model['type'] = 'standard' + dialog_node_model['event_name'] = 'focus' + dialog_node_model['variable'] = 'testString' + dialog_node_model['actions'] = [dialog_node_action_model] + dialog_node_model['digress_in'] = 'not_available' + dialog_node_model['digress_out'] = 'allow_returning' + dialog_node_model['digress_out_slots'] = 'not_allowed' + dialog_node_model['user_label'] = 'testString' + dialog_node_model['disambiguation_opt_out'] = False + + # Construct a dict representation of a Counterexample model + counterexample_model = {} + counterexample_model['text'] = 'testString' + + # Construct a dict representation of a WorkspaceSystemSettingsTooling model + workspace_system_settings_tooling_model = {} + workspace_system_settings_tooling_model['store_generic_responses'] = True + + # Construct a dict representation of a WorkspaceSystemSettingsDisambiguation model + workspace_system_settings_disambiguation_model = {} + workspace_system_settings_disambiguation_model['prompt'] = 'testString' + workspace_system_settings_disambiguation_model['none_of_the_above_prompt'] = 'testString' + workspace_system_settings_disambiguation_model['enabled'] = False + workspace_system_settings_disambiguation_model['sensitivity'] = 'auto' + workspace_system_settings_disambiguation_model['randomize'] = True + workspace_system_settings_disambiguation_model['max_suggestions'] = 1 + workspace_system_settings_disambiguation_model['suggestion_text_policy'] = 'testString' + + # Construct a dict representation of a WorkspaceSystemSettingsSystemEntities model + workspace_system_settings_system_entities_model = {} + workspace_system_settings_system_entities_model['enabled'] = False + + # Construct a dict representation of a WorkspaceSystemSettingsOffTopic model + workspace_system_settings_off_topic_model = {} + workspace_system_settings_off_topic_model['enabled'] = False + + # Construct a dict representation of a WorkspaceSystemSettingsNlp model + workspace_system_settings_nlp_model = {} + workspace_system_settings_nlp_model['model'] = 'testString' + + # Construct a dict representation of a WorkspaceSystemSettings model + workspace_system_settings_model = {} + workspace_system_settings_model['tooling'] = workspace_system_settings_tooling_model + workspace_system_settings_model['disambiguation'] = workspace_system_settings_disambiguation_model + workspace_system_settings_model['human_agent_assist'] = {'anyKey': 'anyValue'} + workspace_system_settings_model['spelling_suggestions'] = False + workspace_system_settings_model['spelling_auto_correct'] = False + workspace_system_settings_model['system_entities'] = workspace_system_settings_system_entities_model + workspace_system_settings_model['off_topic'] = workspace_system_settings_off_topic_model + workspace_system_settings_model['nlp'] = workspace_system_settings_nlp_model + workspace_system_settings_model['foo'] = 'testString' + + # Construct a dict representation of a WebhookHeader model + webhook_header_model = {} + webhook_header_model['name'] = 'testString' + webhook_header_model['value'] = 'testString' + + # Construct a dict representation of a Webhook model + webhook_model = {} + webhook_model['url'] = 'testString' + webhook_model['name'] = 'testString' + webhook_model['headers'] = [webhook_header_model] + + # Construct a dict representation of a Mention model + mention_model = {} + mention_model['entity'] = 'testString' + mention_model['location'] = [38] + + # Construct a dict representation of a Example model + example_model = {} + example_model['text'] = 'testString' + example_model['mentions'] = [mention_model] + + # Construct a dict representation of a CreateIntent model + create_intent_model = {} + create_intent_model['intent'] = 'testString' + create_intent_model['description'] = 'testString' + create_intent_model['examples'] = [example_model] + + # Construct a dict representation of a CreateValue model + create_value_model = {} + create_value_model['value'] = 'testString' + create_value_model['metadata'] = {'anyKey': 'anyValue'} + create_value_model['type'] = 'synonyms' + create_value_model['synonyms'] = ['testString'] + create_value_model['patterns'] = ['testString'] + + # Construct a dict representation of a CreateEntity model + create_entity_model = {} + create_entity_model['entity'] = 'testString' + create_entity_model['description'] = 'testString' + create_entity_model['metadata'] = {'anyKey': 'anyValue'} + create_entity_model['fuzzy_match'] = True + create_entity_model['values'] = [create_value_model] + + # Set up parameter values + workspace_id = 'testString' + name = 'testString' + description = 'testString' + language = 'testString' + dialog_nodes = [dialog_node_model] + counterexamples = [counterexample_model] + metadata = {'anyKey': 'anyValue'} + learning_opt_out = False + system_settings = workspace_system_settings_model + webhooks = [webhook_model] + intents = [create_intent_model] + entities = [create_entity_model] + append = False + include_audit = False + + # Invoke method + response = _service.update_workspace( + workspace_id, + name=name, + description=description, + language=language, + dialog_nodes=dialog_nodes, + counterexamples=counterexamples, + metadata=metadata, + learning_opt_out=learning_opt_out, + system_settings=system_settings, + webhooks=webhooks, + intents=intents, + entities=entities, + append=append, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'append={}'.format('true' if append else 'false') in query_string + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['name'] == 'testString' + assert req_body['description'] == 'testString' + assert req_body['language'] == 'testString' + assert req_body['dialog_nodes'] == [dialog_node_model] + assert req_body['counterexamples'] == [counterexample_model] + assert req_body['metadata'] == {'anyKey': 'anyValue'} + assert req_body['learning_opt_out'] == False + assert req_body['system_settings'] == workspace_system_settings_model + assert req_body['webhooks'] == [webhook_model] + assert req_body['intents'] == [create_intent_model] + assert req_body['entities'] == [create_entity_model] + + def test_update_workspace_all_params_with_retries(self): + # Enable retries and run test_update_workspace_all_params. + _service.enable_retries() + self.test_update_workspace_all_params() + + # Disable retries and run test_update_workspace_all_params. + _service.disable_retries() + self.test_update_workspace_all_params() + + @responses.activate + def test_update_workspace_required_params(self): + """ + test_update_workspace_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString') + mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + + # Invoke method + response = _service.update_workspace( + workspace_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_update_workspace_required_params_with_retries(self): + # Enable retries and run test_update_workspace_required_params. + _service.enable_retries() + self.test_update_workspace_required_params() + + # Disable retries and run test_update_workspace_required_params. + _service.disable_retries() + self.test_update_workspace_required_params() + + @responses.activate + def test_update_workspace_value_error(self): + """ + test_update_workspace_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString') + mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, } - } - responses.add( - responses.GET, - url, - body=json.dumps(response), - status=200, - content_type='application/json') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-02-03') - intents = service.list_intents(workspace_id='boguswid', export=False).get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert intents == response - # Verify that response can be converted to an IntentCollection - IntentCollection._from_dict(intents) - -@responses.activate -def test_update_intent(): - endpoint = '/v1/workspaces/{0}/intents/{1}'.format('boguswid', - 'pizza_order') - url = '{0}{1}'.format(base_url, endpoint) - response = { - "intent": "pizza_order", - "created": "2015-12-06T23:53:59.153Z", - "updated": "2015-12-07T18:53:59.153Z", - "description": "User wants to start a new pizza order" - } - responses.add( - responses.POST, - url, - body=json.dumps(response), - status=200, - content_type='application/json') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-02-03') - intent = service.update_intent( - workspace_id='boguswid', - intent='pizza_order', - new_intent='pizza_order', - new_description='User wants to start a new pizza order').get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert intent == response - # Verify that response can be converted to an Intent - Intent._from_dict(intent) - -def test_intent_models(): - intent = Intent(intent="pizza_order", - created=datetime.datetime(2015, 12, 6, 23, 53, 59, 15300, tzinfo=tzutc()), - updated=datetime.datetime(2015, 12, 7, 18, 53, 59, 15300, tzinfo=tzutc()), - description="User wants to start a new pizza order") - intentDict = intent._to_dict() - check = Intent._from_dict(intentDict) - assert intent == check - - -######################### -# logs -######################### - - -@responses.activate -def test_list_logs(): - endpoint = '/v1/workspaces/{0}/logs'.format('boguswid') - url = '{0}{1}'.format(base_url, endpoint) - response = { - "logs": [{ - "request": { - "input": { - "text": "Can you turn off the AC" - }, - "context": { - "conversation_id": "f2c7e362-4cc8-4761-8b0f-9ccd70c63bca", - "system": {} - } - }, - "response": { - "input": { - "text": "Can you turn off the AC" - }, - "context": { - "conversation_id": "f2c7e362-4cc8-4761-8b0f-9ccd70c63bca", - "system": { - "dialog_stack": ["root"], - "dialog_turn_counter": 1, - "dialog_request_counter": 1 - }, - "defaultCounter": 0 - }, - "entities": [], - "intents": [{ - "intent": "turn_off", - "confidence": 0.9332477126694649 - }], - "output": { - "log_messages": [], - "text": [ - "Hi. It looks like a nice drive today. What would you like me to do?" - ], - "nodes_visited": ["node_1_1467221909631"] - } - }, - "request_timestamp": "2016-07-16T09:22:38.960Z", - "response_timestamp": "2016-07-16T09:22:39.011Z", - "log_id": "e70d6c12-582d-47a8-a6a2-845120a1f232" - }], - "pagination": { - "next_url": - "/v1/workspaces/15fb0e8a-463d-4fec-86aa-a737d9c38a32/logs?cursor=dOfVSuh6fBpDuOxEL9m1S7JKDV7KLuBmRR+lQG1s1i/rVnBZ0ZBVCuy53ruHgPImC31gQv5prUsJ77e0Mj+6sGu/yfusHYF5&version=2016-07-11&filter=response.top_intent:turn_off&page_limit=1", - "matched": - 215 + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.update_workspace(**req_copy) + + def test_update_workspace_value_error_with_retries(self): + # Enable retries and run test_update_workspace_value_error. + _service.enable_retries() + self.test_update_workspace_value_error() + + # Disable retries and run test_update_workspace_value_error. + _service.disable_retries() + self.test_update_workspace_value_error() + + +class TestDeleteWorkspace: + """ + Test Class for delete_workspace + """ + + @responses.activate + def test_delete_workspace_all_params(self): + """ + delete_workspace() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + + # Invoke method + response = _service.delete_workspace( + workspace_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_delete_workspace_all_params_with_retries(self): + # Enable retries and run test_delete_workspace_all_params. + _service.enable_retries() + self.test_delete_workspace_all_params() + + # Disable retries and run test_delete_workspace_all_params. + _service.disable_retries() + self.test_delete_workspace_all_params() + + @responses.activate + def test_delete_workspace_value_error(self): + """ + test_delete_workspace_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, } - } - responses.add( - responses.GET, - url, - body=json.dumps(response), - status=200, - content_type='application/json') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-04-21') - logs = service.list_logs( - workspace_id='boguswid').get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert logs == response - -@responses.activate -def test_list_all_logs(): - endpoint = '/v1/logs' - url = '{0}{1}'.format(base_url, endpoint) - response = { - "logs": [{ - "request": { - "input": { - "text": "Good morning" - }, - "context": { - "metadata": { - "deployment": "deployment_1" - } - } - }, - "response": { - "intents": [{ - "intent": "hello", - "confidence": 1 - }], - "entities": [], - "input": { - "text": "Good morning" - }, - "output": { - "text": ["Hi! What can I do for you?"], - "nodes_visited": ["node_2_1501875253968"], - "log_messages": [] - }, - "context": { - "metadata": { - "deployment": "deployment_1" - }, - "conversation_id": "81a43b48-7dca-4a7d-a0d7-6fed03fcee69", - "system": { - "dialog_stack": [{ - "dialog_node": "root" - }], - "dialog_turn_counter": 1, - "dialog_request_counter": 1, - "_node_output_map": { - "node_2_1501875253968": [0] - }, - "branch_exited": True, - "branch_exited_reason": "completed" - } - } - }, - "language": "en", - "workspace_id": "9978a49e-ea89-4493-b33d-82298d3db20d", - "request_timestamp": "2017-09-13T19:52:32.611Z", - "response_timestamp": "2017-09-13T19:52:32.628Z", - "log_id": "aa886a8a-bac5-4b91-8323-2fd61a69c9d3" - }], - "pagination": {} - } - responses.add( - responses.GET, - url, - body=json.dumps(response), - status=200, - content_type='application/json') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-04-21') - logs = service.list_all_logs( - 'language::en,request.context.metadata.deployment::deployment_1').get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert logs == response - - -######################### -# message -######################### - - -@responses.activate -def test_message(): - - assistant = ibm_watson.AssistantV1( - username="username", password="password", version='2016-09-20') - assistant.set_default_headers({'x-watson-learning-opt-out': "true"}) - - workspace_id = 'f8fdbc65-e0bd-4e43-b9f8-2975a366d4ec' - message_url = '%s/v1/workspaces/%s/message' % (base_url, workspace_id) - url1_str = '%s/v1/workspaces/%s/message?version=2016-09-20' - message_url1 = url1_str % (base_url, workspace_id) - message_response = { - "context": { - "conversation_id": "1b7b67c0-90ed-45dc-8508-9488bc483d5b", - "system": { - "dialog_stack": ["root"], - "dialog_turn_counter": 1, - "dialog_request_counter": 1 - } - }, - "intents": [], - "entities": [], - "input": {}, - "output": { - "text": "okay", - "log_messages": [] + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_workspace(**req_copy) + + def test_delete_workspace_value_error_with_retries(self): + # Enable retries and run test_delete_workspace_value_error. + _service.enable_retries() + self.test_delete_workspace_value_error() + + # Disable retries and run test_delete_workspace_value_error. + _service.disable_retries() + self.test_delete_workspace_value_error() + + +class TestCreateWorkspaceAsync: + """ + Test Class for create_workspace_async + """ + + @responses.activate + def test_create_workspace_async_all_params(self): + """ + create_workspace_async() + """ + # Set up mock + url = preprocess_url('/v1/workspaces_async') + mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=202, + ) + + # Construct a dict representation of a DialogNodeOutputTextValuesElement model + dialog_node_output_text_values_element_model = {} + dialog_node_output_text_values_element_model['text'] = 'testString' + + # Construct a dict representation of a ResponseGenericChannel model + response_generic_channel_model = {} + response_generic_channel_model['channel'] = 'chat' + + # Construct a dict representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeText model + dialog_node_output_generic_model = {} + dialog_node_output_generic_model['response_type'] = 'text' + dialog_node_output_generic_model['values'] = [dialog_node_output_text_values_element_model] + dialog_node_output_generic_model['selection_policy'] = 'sequential' + dialog_node_output_generic_model['delimiter'] = '\\n' + dialog_node_output_generic_model['channels'] = [response_generic_channel_model] + + # Construct a dict representation of a DialogNodeOutputModifiers model + dialog_node_output_modifiers_model = {} + dialog_node_output_modifiers_model['overwrite'] = True + + # Construct a dict representation of a DialogNodeOutput model + dialog_node_output_model = {} + dialog_node_output_model['generic'] = [dialog_node_output_generic_model] + dialog_node_output_model['integrations'] = {'key1': {'anyKey': 'anyValue'}} + dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model + dialog_node_output_model['foo'] = 'testString' + + # Construct a dict representation of a DialogNodeContext model + dialog_node_context_model = {} + dialog_node_context_model['integrations'] = {'key1': {'anyKey': 'anyValue'}} + dialog_node_context_model['foo'] = 'testString' + + # Construct a dict representation of a DialogNodeNextStep model + dialog_node_next_step_model = {} + dialog_node_next_step_model['behavior'] = 'get_user_input' + dialog_node_next_step_model['dialog_node'] = 'testString' + dialog_node_next_step_model['selector'] = 'condition' + + # Construct a dict representation of a DialogNodeAction model + dialog_node_action_model = {} + dialog_node_action_model['name'] = 'testString' + dialog_node_action_model['type'] = 'client' + dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'} + dialog_node_action_model['result_variable'] = 'testString' + dialog_node_action_model['credentials'] = 'testString' + + # Construct a dict representation of a DialogNode model + dialog_node_model = {} + dialog_node_model['dialog_node'] = 'testString' + dialog_node_model['description'] = 'testString' + dialog_node_model['conditions'] = 'testString' + dialog_node_model['parent'] = 'testString' + dialog_node_model['previous_sibling'] = 'testString' + dialog_node_model['output'] = dialog_node_output_model + dialog_node_model['context'] = dialog_node_context_model + dialog_node_model['metadata'] = {'anyKey': 'anyValue'} + dialog_node_model['next_step'] = dialog_node_next_step_model + dialog_node_model['title'] = 'testString' + dialog_node_model['type'] = 'standard' + dialog_node_model['event_name'] = 'focus' + dialog_node_model['variable'] = 'testString' + dialog_node_model['actions'] = [dialog_node_action_model] + dialog_node_model['digress_in'] = 'not_available' + dialog_node_model['digress_out'] = 'allow_returning' + dialog_node_model['digress_out_slots'] = 'not_allowed' + dialog_node_model['user_label'] = 'testString' + dialog_node_model['disambiguation_opt_out'] = False + + # Construct a dict representation of a Counterexample model + counterexample_model = {} + counterexample_model['text'] = 'testString' + + # Construct a dict representation of a WorkspaceSystemSettingsTooling model + workspace_system_settings_tooling_model = {} + workspace_system_settings_tooling_model['store_generic_responses'] = True + + # Construct a dict representation of a WorkspaceSystemSettingsDisambiguation model + workspace_system_settings_disambiguation_model = {} + workspace_system_settings_disambiguation_model['prompt'] = 'testString' + workspace_system_settings_disambiguation_model['none_of_the_above_prompt'] = 'testString' + workspace_system_settings_disambiguation_model['enabled'] = False + workspace_system_settings_disambiguation_model['sensitivity'] = 'auto' + workspace_system_settings_disambiguation_model['randomize'] = True + workspace_system_settings_disambiguation_model['max_suggestions'] = 1 + workspace_system_settings_disambiguation_model['suggestion_text_policy'] = 'testString' + + # Construct a dict representation of a WorkspaceSystemSettingsSystemEntities model + workspace_system_settings_system_entities_model = {} + workspace_system_settings_system_entities_model['enabled'] = False + + # Construct a dict representation of a WorkspaceSystemSettingsOffTopic model + workspace_system_settings_off_topic_model = {} + workspace_system_settings_off_topic_model['enabled'] = False + + # Construct a dict representation of a WorkspaceSystemSettingsNlp model + workspace_system_settings_nlp_model = {} + workspace_system_settings_nlp_model['model'] = 'testString' + + # Construct a dict representation of a WorkspaceSystemSettings model + workspace_system_settings_model = {} + workspace_system_settings_model['tooling'] = workspace_system_settings_tooling_model + workspace_system_settings_model['disambiguation'] = workspace_system_settings_disambiguation_model + workspace_system_settings_model['human_agent_assist'] = {'anyKey': 'anyValue'} + workspace_system_settings_model['spelling_suggestions'] = False + workspace_system_settings_model['spelling_auto_correct'] = False + workspace_system_settings_model['system_entities'] = workspace_system_settings_system_entities_model + workspace_system_settings_model['off_topic'] = workspace_system_settings_off_topic_model + workspace_system_settings_model['nlp'] = workspace_system_settings_nlp_model + workspace_system_settings_model['foo'] = 'testString' + + # Construct a dict representation of a WebhookHeader model + webhook_header_model = {} + webhook_header_model['name'] = 'testString' + webhook_header_model['value'] = 'testString' + + # Construct a dict representation of a Webhook model + webhook_model = {} + webhook_model['url'] = 'testString' + webhook_model['name'] = 'testString' + webhook_model['headers'] = [webhook_header_model] + + # Construct a dict representation of a Mention model + mention_model = {} + mention_model['entity'] = 'testString' + mention_model['location'] = [38] + + # Construct a dict representation of a Example model + example_model = {} + example_model['text'] = 'testString' + example_model['mentions'] = [mention_model] + + # Construct a dict representation of a CreateIntent model + create_intent_model = {} + create_intent_model['intent'] = 'testString' + create_intent_model['description'] = 'testString' + create_intent_model['examples'] = [example_model] + + # Construct a dict representation of a CreateValue model + create_value_model = {} + create_value_model['value'] = 'testString' + create_value_model['metadata'] = {'anyKey': 'anyValue'} + create_value_model['type'] = 'synonyms' + create_value_model['synonyms'] = ['testString'] + create_value_model['patterns'] = ['testString'] + + # Construct a dict representation of a CreateEntity model + create_entity_model = {} + create_entity_model['entity'] = 'testString' + create_entity_model['description'] = 'testString' + create_entity_model['metadata'] = {'anyKey': 'anyValue'} + create_entity_model['fuzzy_match'] = True + create_entity_model['values'] = [create_value_model] + + # Set up parameter values + name = 'testString' + description = 'testString' + language = 'testString' + dialog_nodes = [dialog_node_model] + counterexamples = [counterexample_model] + metadata = {'anyKey': 'anyValue'} + learning_opt_out = False + system_settings = workspace_system_settings_model + webhooks = [webhook_model] + intents = [create_intent_model] + entities = [create_entity_model] + + # Invoke method + response = _service.create_workspace_async( + name=name, + description=description, + language=language, + dialog_nodes=dialog_nodes, + counterexamples=counterexamples, + metadata=metadata, + learning_opt_out=learning_opt_out, + system_settings=system_settings, + webhooks=webhooks, + intents=intents, + entities=entities, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 202 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['name'] == 'testString' + assert req_body['description'] == 'testString' + assert req_body['language'] == 'testString' + assert req_body['dialog_nodes'] == [dialog_node_model] + assert req_body['counterexamples'] == [counterexample_model] + assert req_body['metadata'] == {'anyKey': 'anyValue'} + assert req_body['learning_opt_out'] == False + assert req_body['system_settings'] == workspace_system_settings_model + assert req_body['webhooks'] == [webhook_model] + assert req_body['intents'] == [create_intent_model] + assert req_body['entities'] == [create_entity_model] + + def test_create_workspace_async_all_params_with_retries(self): + # Enable retries and run test_create_workspace_async_all_params. + _service.enable_retries() + self.test_create_workspace_async_all_params() + + # Disable retries and run test_create_workspace_async_all_params. + _service.disable_retries() + self.test_create_workspace_async_all_params() + + @responses.activate + def test_create_workspace_async_required_params(self): + """ + test_create_workspace_async_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces_async') + mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=202, + ) + + # Invoke method + response = _service.create_workspace_async() + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 202 + + def test_create_workspace_async_required_params_with_retries(self): + # Enable retries and run test_create_workspace_async_required_params. + _service.enable_retries() + self.test_create_workspace_async_required_params() + + # Disable retries and run test_create_workspace_async_required_params. + _service.disable_retries() + self.test_create_workspace_async_required_params() + + @responses.activate + def test_create_workspace_async_value_error(self): + """ + test_create_workspace_async_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces_async') + mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=202, + ) + + # Pass in all but one required param and check for a ValueError + req_param_dict = { } - } - - responses.add( - responses.POST, - message_url, - body=json.dumps(message_response), - status=200, - content_type='application/json') - - message = assistant.message( - workspace_id=workspace_id, - input={'text': 'Turn on the lights'}, - context=None).get_result() - - assert message is not None - assert responses.calls[0].request.url == message_url1 - assert 'x-watson-learning-opt-out' in responses.calls[0].request.headers - assert responses.calls[0].request.headers['x-watson-learning-opt-out'] == 'true' - assert responses.calls[0].response.text == json.dumps(message_response) - - # test context - responses.add( - responses.POST, - message_url, - body=message_response, - status=200, - content_type='application/json') - - message_ctx = { - 'context': { - 'conversation_id': '1b7b67c0-90ed-45dc-8508-9488bc483d5b', - 'system': { - 'dialog_stack': ['root'], - 'dialog_turn_counter': 2, - 'dialog_request_counter': 1 - } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.create_workspace_async(**req_copy) + + def test_create_workspace_async_value_error_with_retries(self): + # Enable retries and run test_create_workspace_async_value_error. + _service.enable_retries() + self.test_create_workspace_async_value_error() + + # Disable retries and run test_create_workspace_async_value_error. + _service.disable_retries() + self.test_create_workspace_async_value_error() + + +class TestUpdateWorkspaceAsync: + """ + Test Class for update_workspace_async + """ + + @responses.activate + def test_update_workspace_async_all_params(self): + """ + update_workspace_async() + """ + # Set up mock + url = preprocess_url('/v1/workspaces_async/testString') + mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=202, + ) + + # Construct a dict representation of a DialogNodeOutputTextValuesElement model + dialog_node_output_text_values_element_model = {} + dialog_node_output_text_values_element_model['text'] = 'testString' + + # Construct a dict representation of a ResponseGenericChannel model + response_generic_channel_model = {} + response_generic_channel_model['channel'] = 'chat' + + # Construct a dict representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeText model + dialog_node_output_generic_model = {} + dialog_node_output_generic_model['response_type'] = 'text' + dialog_node_output_generic_model['values'] = [dialog_node_output_text_values_element_model] + dialog_node_output_generic_model['selection_policy'] = 'sequential' + dialog_node_output_generic_model['delimiter'] = '\\n' + dialog_node_output_generic_model['channels'] = [response_generic_channel_model] + + # Construct a dict representation of a DialogNodeOutputModifiers model + dialog_node_output_modifiers_model = {} + dialog_node_output_modifiers_model['overwrite'] = True + + # Construct a dict representation of a DialogNodeOutput model + dialog_node_output_model = {} + dialog_node_output_model['generic'] = [dialog_node_output_generic_model] + dialog_node_output_model['integrations'] = {'key1': {'anyKey': 'anyValue'}} + dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model + dialog_node_output_model['foo'] = 'testString' + + # Construct a dict representation of a DialogNodeContext model + dialog_node_context_model = {} + dialog_node_context_model['integrations'] = {'key1': {'anyKey': 'anyValue'}} + dialog_node_context_model['foo'] = 'testString' + + # Construct a dict representation of a DialogNodeNextStep model + dialog_node_next_step_model = {} + dialog_node_next_step_model['behavior'] = 'get_user_input' + dialog_node_next_step_model['dialog_node'] = 'testString' + dialog_node_next_step_model['selector'] = 'condition' + + # Construct a dict representation of a DialogNodeAction model + dialog_node_action_model = {} + dialog_node_action_model['name'] = 'testString' + dialog_node_action_model['type'] = 'client' + dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'} + dialog_node_action_model['result_variable'] = 'testString' + dialog_node_action_model['credentials'] = 'testString' + + # Construct a dict representation of a DialogNode model + dialog_node_model = {} + dialog_node_model['dialog_node'] = 'testString' + dialog_node_model['description'] = 'testString' + dialog_node_model['conditions'] = 'testString' + dialog_node_model['parent'] = 'testString' + dialog_node_model['previous_sibling'] = 'testString' + dialog_node_model['output'] = dialog_node_output_model + dialog_node_model['context'] = dialog_node_context_model + dialog_node_model['metadata'] = {'anyKey': 'anyValue'} + dialog_node_model['next_step'] = dialog_node_next_step_model + dialog_node_model['title'] = 'testString' + dialog_node_model['type'] = 'standard' + dialog_node_model['event_name'] = 'focus' + dialog_node_model['variable'] = 'testString' + dialog_node_model['actions'] = [dialog_node_action_model] + dialog_node_model['digress_in'] = 'not_available' + dialog_node_model['digress_out'] = 'allow_returning' + dialog_node_model['digress_out_slots'] = 'not_allowed' + dialog_node_model['user_label'] = 'testString' + dialog_node_model['disambiguation_opt_out'] = False + + # Construct a dict representation of a Counterexample model + counterexample_model = {} + counterexample_model['text'] = 'testString' + + # Construct a dict representation of a WorkspaceSystemSettingsTooling model + workspace_system_settings_tooling_model = {} + workspace_system_settings_tooling_model['store_generic_responses'] = True + + # Construct a dict representation of a WorkspaceSystemSettingsDisambiguation model + workspace_system_settings_disambiguation_model = {} + workspace_system_settings_disambiguation_model['prompt'] = 'testString' + workspace_system_settings_disambiguation_model['none_of_the_above_prompt'] = 'testString' + workspace_system_settings_disambiguation_model['enabled'] = False + workspace_system_settings_disambiguation_model['sensitivity'] = 'auto' + workspace_system_settings_disambiguation_model['randomize'] = True + workspace_system_settings_disambiguation_model['max_suggestions'] = 1 + workspace_system_settings_disambiguation_model['suggestion_text_policy'] = 'testString' + + # Construct a dict representation of a WorkspaceSystemSettingsSystemEntities model + workspace_system_settings_system_entities_model = {} + workspace_system_settings_system_entities_model['enabled'] = False + + # Construct a dict representation of a WorkspaceSystemSettingsOffTopic model + workspace_system_settings_off_topic_model = {} + workspace_system_settings_off_topic_model['enabled'] = False + + # Construct a dict representation of a WorkspaceSystemSettingsNlp model + workspace_system_settings_nlp_model = {} + workspace_system_settings_nlp_model['model'] = 'testString' + + # Construct a dict representation of a WorkspaceSystemSettings model + workspace_system_settings_model = {} + workspace_system_settings_model['tooling'] = workspace_system_settings_tooling_model + workspace_system_settings_model['disambiguation'] = workspace_system_settings_disambiguation_model + workspace_system_settings_model['human_agent_assist'] = {'anyKey': 'anyValue'} + workspace_system_settings_model['spelling_suggestions'] = False + workspace_system_settings_model['spelling_auto_correct'] = False + workspace_system_settings_model['system_entities'] = workspace_system_settings_system_entities_model + workspace_system_settings_model['off_topic'] = workspace_system_settings_off_topic_model + workspace_system_settings_model['nlp'] = workspace_system_settings_nlp_model + workspace_system_settings_model['foo'] = 'testString' + + # Construct a dict representation of a WebhookHeader model + webhook_header_model = {} + webhook_header_model['name'] = 'testString' + webhook_header_model['value'] = 'testString' + + # Construct a dict representation of a Webhook model + webhook_model = {} + webhook_model['url'] = 'testString' + webhook_model['name'] = 'testString' + webhook_model['headers'] = [webhook_header_model] + + # Construct a dict representation of a Mention model + mention_model = {} + mention_model['entity'] = 'testString' + mention_model['location'] = [38] + + # Construct a dict representation of a Example model + example_model = {} + example_model['text'] = 'testString' + example_model['mentions'] = [mention_model] + + # Construct a dict representation of a CreateIntent model + create_intent_model = {} + create_intent_model['intent'] = 'testString' + create_intent_model['description'] = 'testString' + create_intent_model['examples'] = [example_model] + + # Construct a dict representation of a CreateValue model + create_value_model = {} + create_value_model['value'] = 'testString' + create_value_model['metadata'] = {'anyKey': 'anyValue'} + create_value_model['type'] = 'synonyms' + create_value_model['synonyms'] = ['testString'] + create_value_model['patterns'] = ['testString'] + + # Construct a dict representation of a CreateEntity model + create_entity_model = {} + create_entity_model['entity'] = 'testString' + create_entity_model['description'] = 'testString' + create_entity_model['metadata'] = {'anyKey': 'anyValue'} + create_entity_model['fuzzy_match'] = True + create_entity_model['values'] = [create_value_model] + + # Set up parameter values + workspace_id = 'testString' + name = 'testString' + description = 'testString' + language = 'testString' + dialog_nodes = [dialog_node_model] + counterexamples = [counterexample_model] + metadata = {'anyKey': 'anyValue'} + learning_opt_out = False + system_settings = workspace_system_settings_model + webhooks = [webhook_model] + intents = [create_intent_model] + entities = [create_entity_model] + append = False + + # Invoke method + response = _service.update_workspace_async( + workspace_id, + name=name, + description=description, + language=language, + dialog_nodes=dialog_nodes, + counterexamples=counterexamples, + metadata=metadata, + learning_opt_out=learning_opt_out, + system_settings=system_settings, + webhooks=webhooks, + intents=intents, + entities=entities, + append=append, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 202 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'append={}'.format('true' if append else 'false') in query_string + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['name'] == 'testString' + assert req_body['description'] == 'testString' + assert req_body['language'] == 'testString' + assert req_body['dialog_nodes'] == [dialog_node_model] + assert req_body['counterexamples'] == [counterexample_model] + assert req_body['metadata'] == {'anyKey': 'anyValue'} + assert req_body['learning_opt_out'] == False + assert req_body['system_settings'] == workspace_system_settings_model + assert req_body['webhooks'] == [webhook_model] + assert req_body['intents'] == [create_intent_model] + assert req_body['entities'] == [create_entity_model] + + def test_update_workspace_async_all_params_with_retries(self): + # Enable retries and run test_update_workspace_async_all_params. + _service.enable_retries() + self.test_update_workspace_async_all_params() + + # Disable retries and run test_update_workspace_async_all_params. + _service.disable_retries() + self.test_update_workspace_async_all_params() + + @responses.activate + def test_update_workspace_async_required_params(self): + """ + test_update_workspace_async_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces_async/testString') + mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=202, + ) + + # Set up parameter values + workspace_id = 'testString' + + # Invoke method + response = _service.update_workspace_async( + workspace_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 202 + + def test_update_workspace_async_required_params_with_retries(self): + # Enable retries and run test_update_workspace_async_required_params. + _service.enable_retries() + self.test_update_workspace_async_required_params() + + # Disable retries and run test_update_workspace_async_required_params. + _service.disable_retries() + self.test_update_workspace_async_required_params() + + @responses.activate + def test_update_workspace_async_value_error(self): + """ + test_update_workspace_async_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces_async/testString') + mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=202, + ) + + # Set up parameter values + workspace_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.update_workspace_async(**req_copy) + + def test_update_workspace_async_value_error_with_retries(self): + # Enable retries and run test_update_workspace_async_value_error. + _service.enable_retries() + self.test_update_workspace_async_value_error() + + # Disable retries and run test_update_workspace_async_value_error. + _service.disable_retries() + self.test_update_workspace_async_value_error() + + +class TestExportWorkspaceAsync: + """ + Test Class for export_workspace_async + """ + + @responses.activate + def test_export_workspace_async_all_params(self): + """ + export_workspace_async() + """ + # Set up mock + url = preprocess_url('/v1/workspaces_async/testString/export') + mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + include_audit = False + sort = 'stable' + verbose = False + + # Invoke method + response = _service.export_workspace_async( + workspace_id, + include_audit=include_audit, + sort=sort, + verbose=verbose, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + assert 'sort={}'.format(sort) in query_string + assert 'verbose={}'.format('true' if verbose else 'false') in query_string + + def test_export_workspace_async_all_params_with_retries(self): + # Enable retries and run test_export_workspace_async_all_params. + _service.enable_retries() + self.test_export_workspace_async_all_params() + + # Disable retries and run test_export_workspace_async_all_params. + _service.disable_retries() + self.test_export_workspace_async_all_params() + + @responses.activate + def test_export_workspace_async_required_params(self): + """ + test_export_workspace_async_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces_async/testString/export') + mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + + # Invoke method + response = _service.export_workspace_async( + workspace_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_export_workspace_async_required_params_with_retries(self): + # Enable retries and run test_export_workspace_async_required_params. + _service.enable_retries() + self.test_export_workspace_async_required_params() + + # Disable retries and run test_export_workspace_async_required_params. + _service.disable_retries() + self.test_export_workspace_async_required_params() + + @responses.activate + def test_export_workspace_async_value_error(self): + """ + test_export_workspace_async_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces_async/testString/export') + mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.export_workspace_async(**req_copy) + + def test_export_workspace_async_value_error_with_retries(self): + # Enable retries and run test_export_workspace_async_value_error. + _service.enable_retries() + self.test_export_workspace_async_value_error() + + # Disable retries and run test_export_workspace_async_value_error. + _service.disable_retries() + self.test_export_workspace_async_value_error() + + +# endregion +############################################################################## +# End of Service: Workspaces +############################################################################## + +############################################################################## +# Start of Service: Intents +############################################################################## +# region + + +class TestListIntents: + """ + Test Class for list_intents + """ + + @responses.activate + def test_list_intents_all_params(self): + """ + list_intents() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/intents') + mock_response = '{"intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + export = False + page_limit = 100 + include_count = False + sort = 'intent' + cursor = 'testString' + include_audit = False + + # Invoke method + response = _service.list_intents( + workspace_id, + export=export, + page_limit=page_limit, + include_count=include_count, + sort=sort, + cursor=cursor, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'export={}'.format('true' if export else 'false') in query_string + assert 'page_limit={}'.format(page_limit) in query_string + assert 'include_count={}'.format('true' if include_count else 'false') in query_string + assert 'sort={}'.format(sort) in query_string + assert 'cursor={}'.format(cursor) in query_string + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + + def test_list_intents_all_params_with_retries(self): + # Enable retries and run test_list_intents_all_params. + _service.enable_retries() + self.test_list_intents_all_params() + + # Disable retries and run test_list_intents_all_params. + _service.disable_retries() + self.test_list_intents_all_params() + + @responses.activate + def test_list_intents_required_params(self): + """ + test_list_intents_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/intents') + mock_response = '{"intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + + # Invoke method + response = _service.list_intents( + workspace_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_intents_required_params_with_retries(self): + # Enable retries and run test_list_intents_required_params. + _service.enable_retries() + self.test_list_intents_required_params() + + # Disable retries and run test_list_intents_required_params. + _service.disable_retries() + self.test_list_intents_required_params() + + @responses.activate + def test_list_intents_value_error(self): + """ + test_list_intents_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/intents') + mock_response = '{"intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.list_intents(**req_copy) + + def test_list_intents_value_error_with_retries(self): + # Enable retries and run test_list_intents_value_error. + _service.enable_retries() + self.test_list_intents_value_error() + + # Disable retries and run test_list_intents_value_error. + _service.disable_retries() + self.test_list_intents_value_error() + + +class TestCreateIntent: + """ + Test Class for create_intent + """ + + @responses.activate + def test_create_intent_all_params(self): + """ + create_intent() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/intents') + mock_response = '{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Construct a dict representation of a Mention model + mention_model = {} + mention_model['entity'] = 'testString' + mention_model['location'] = [38] + + # Construct a dict representation of a Example model + example_model = {} + example_model['text'] = 'testString' + example_model['mentions'] = [mention_model] + + # Set up parameter values + workspace_id = 'testString' + intent = 'testString' + description = 'testString' + examples = [example_model] + include_audit = False + + # Invoke method + response = _service.create_intent( + workspace_id, + intent, + description=description, + examples=examples, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['intent'] == 'testString' + assert req_body['description'] == 'testString' + assert req_body['examples'] == [example_model] + + def test_create_intent_all_params_with_retries(self): + # Enable retries and run test_create_intent_all_params. + _service.enable_retries() + self.test_create_intent_all_params() + + # Disable retries and run test_create_intent_all_params. + _service.disable_retries() + self.test_create_intent_all_params() + + @responses.activate + def test_create_intent_required_params(self): + """ + test_create_intent_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/intents') + mock_response = '{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Construct a dict representation of a Mention model + mention_model = {} + mention_model['entity'] = 'testString' + mention_model['location'] = [38] + + # Construct a dict representation of a Example model + example_model = {} + example_model['text'] = 'testString' + example_model['mentions'] = [mention_model] + + # Set up parameter values + workspace_id = 'testString' + intent = 'testString' + description = 'testString' + examples = [example_model] + + # Invoke method + response = _service.create_intent( + workspace_id, + intent, + description=description, + examples=examples, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['intent'] == 'testString' + assert req_body['description'] == 'testString' + assert req_body['examples'] == [example_model] + + def test_create_intent_required_params_with_retries(self): + # Enable retries and run test_create_intent_required_params. + _service.enable_retries() + self.test_create_intent_required_params() + + # Disable retries and run test_create_intent_required_params. + _service.disable_retries() + self.test_create_intent_required_params() + + @responses.activate + def test_create_intent_value_error(self): + """ + test_create_intent_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/intents') + mock_response = '{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Construct a dict representation of a Mention model + mention_model = {} + mention_model['entity'] = 'testString' + mention_model['location'] = [38] + + # Construct a dict representation of a Example model + example_model = {} + example_model['text'] = 'testString' + example_model['mentions'] = [mention_model] + + # Set up parameter values + workspace_id = 'testString' + intent = 'testString' + description = 'testString' + examples = [example_model] + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + "intent": intent, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.create_intent(**req_copy) + + def test_create_intent_value_error_with_retries(self): + # Enable retries and run test_create_intent_value_error. + _service.enable_retries() + self.test_create_intent_value_error() + + # Disable retries and run test_create_intent_value_error. + _service.disable_retries() + self.test_create_intent_value_error() + + +class TestGetIntent: + """ + Test Class for get_intent + """ + + @responses.activate + def test_get_intent_all_params(self): + """ + get_intent() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/intents/testString') + mock_response = '{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + intent = 'testString' + export = False + include_audit = False + + # Invoke method + response = _service.get_intent( + workspace_id, + intent, + export=export, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'export={}'.format('true' if export else 'false') in query_string + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + + def test_get_intent_all_params_with_retries(self): + # Enable retries and run test_get_intent_all_params. + _service.enable_retries() + self.test_get_intent_all_params() + + # Disable retries and run test_get_intent_all_params. + _service.disable_retries() + self.test_get_intent_all_params() + + @responses.activate + def test_get_intent_required_params(self): + """ + test_get_intent_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/intents/testString') + mock_response = '{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + intent = 'testString' + + # Invoke method + response = _service.get_intent( + workspace_id, + intent, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_get_intent_required_params_with_retries(self): + # Enable retries and run test_get_intent_required_params. + _service.enable_retries() + self.test_get_intent_required_params() + + # Disable retries and run test_get_intent_required_params. + _service.disable_retries() + self.test_get_intent_required_params() + + @responses.activate + def test_get_intent_value_error(self): + """ + test_get_intent_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/intents/testString') + mock_response = '{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + intent = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + "intent": intent, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_intent(**req_copy) + + def test_get_intent_value_error_with_retries(self): + # Enable retries and run test_get_intent_value_error. + _service.enable_retries() + self.test_get_intent_value_error() + + # Disable retries and run test_get_intent_value_error. + _service.disable_retries() + self.test_get_intent_value_error() + + +class TestUpdateIntent: + """ + Test Class for update_intent + """ + + @responses.activate + def test_update_intent_all_params(self): + """ + update_intent() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/intents/testString') + mock_response = '{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Construct a dict representation of a Mention model + mention_model = {} + mention_model['entity'] = 'testString' + mention_model['location'] = [38] + + # Construct a dict representation of a Example model + example_model = {} + example_model['text'] = 'testString' + example_model['mentions'] = [mention_model] + + # Set up parameter values + workspace_id = 'testString' + intent = 'testString' + new_intent = 'testString' + new_description = 'testString' + new_examples = [example_model] + append = False + include_audit = False + + # Invoke method + response = _service.update_intent( + workspace_id, + intent, + new_intent=new_intent, + new_description=new_description, + new_examples=new_examples, + append=append, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'append={}'.format('true' if append else 'false') in query_string + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['intent'] == 'testString' + assert req_body['description'] == 'testString' + assert req_body['examples'] == [example_model] + + def test_update_intent_all_params_with_retries(self): + # Enable retries and run test_update_intent_all_params. + _service.enable_retries() + self.test_update_intent_all_params() + + # Disable retries and run test_update_intent_all_params. + _service.disable_retries() + self.test_update_intent_all_params() + + @responses.activate + def test_update_intent_required_params(self): + """ + test_update_intent_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/intents/testString') + mock_response = '{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Construct a dict representation of a Mention model + mention_model = {} + mention_model['entity'] = 'testString' + mention_model['location'] = [38] + + # Construct a dict representation of a Example model + example_model = {} + example_model['text'] = 'testString' + example_model['mentions'] = [mention_model] + + # Set up parameter values + workspace_id = 'testString' + intent = 'testString' + new_intent = 'testString' + new_description = 'testString' + new_examples = [example_model] + + # Invoke method + response = _service.update_intent( + workspace_id, + intent, + new_intent=new_intent, + new_description=new_description, + new_examples=new_examples, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['intent'] == 'testString' + assert req_body['description'] == 'testString' + assert req_body['examples'] == [example_model] + + def test_update_intent_required_params_with_retries(self): + # Enable retries and run test_update_intent_required_params. + _service.enable_retries() + self.test_update_intent_required_params() + + # Disable retries and run test_update_intent_required_params. + _service.disable_retries() + self.test_update_intent_required_params() + + @responses.activate + def test_update_intent_value_error(self): + """ + test_update_intent_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/intents/testString') + mock_response = '{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Construct a dict representation of a Mention model + mention_model = {} + mention_model['entity'] = 'testString' + mention_model['location'] = [38] + + # Construct a dict representation of a Example model + example_model = {} + example_model['text'] = 'testString' + example_model['mentions'] = [mention_model] + + # Set up parameter values + workspace_id = 'testString' + intent = 'testString' + new_intent = 'testString' + new_description = 'testString' + new_examples = [example_model] + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + "intent": intent, } - } - message = assistant.message( - workspace_id=workspace_id, - input={'text': 'Turn on the lights'}, - context=json.dumps(message_ctx['context'])).get_result() - - assert message is not None - assert responses.calls[1].request.url == message_url1 - assert responses.calls[1].response.text == json.dumps(message_response) - - assert len(responses.calls) == 2 - -@responses.activate -def test_message_with_models(): - - assistant = ibm_watson.AssistantV1( - username="username", password="password", version='2016-09-20') - assistant.set_default_headers({'x-watson-learning-opt-out': "true"}) - - workspace_id = 'f8fdbc65-e0bd-4e43-b9f8-2975a366d4ec' - message_url = '%s/v1/workspaces/%s/message' % (base_url, workspace_id) - url1_str = '%s/v1/workspaces/%s/message?version=2016-09-20' - message_url1 = url1_str % (base_url, workspace_id) - message_response = { - "context": { - "conversation_id": "1b7b67c0-90ed-45dc-8508-9488bc483d5b", - "system": { - "dialog_stack": ["root"], - "dialog_turn_counter": 1, - "dialog_request_counter": 1 - } - }, - "intents": [], - "entities": [], - "input": {}, - "output": { - "text": "okay", - "log_messages": [] + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.update_intent(**req_copy) + + def test_update_intent_value_error_with_retries(self): + # Enable retries and run test_update_intent_value_error. + _service.enable_retries() + self.test_update_intent_value_error() + + # Disable retries and run test_update_intent_value_error. + _service.disable_retries() + self.test_update_intent_value_error() + + +class TestDeleteIntent: + """ + Test Class for delete_intent + """ + + @responses.activate + def test_delete_intent_all_params(self): + """ + delete_intent() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/intents/testString') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + intent = 'testString' + + # Invoke method + response = _service.delete_intent( + workspace_id, + intent, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_delete_intent_all_params_with_retries(self): + # Enable retries and run test_delete_intent_all_params. + _service.enable_retries() + self.test_delete_intent_all_params() + + # Disable retries and run test_delete_intent_all_params. + _service.disable_retries() + self.test_delete_intent_all_params() + + @responses.activate + def test_delete_intent_value_error(self): + """ + test_delete_intent_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/intents/testString') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + intent = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + "intent": intent, } - } - - responses.add( - responses.POST, - message_url, - body=json.dumps(message_response), - status=200, - content_type='application/json') - - message = assistant.message( - workspace_id=workspace_id, - input=MessageInput(text='Turn on the lights'), - context=None).get_result() - - assert message is not None - assert responses.calls[0].request.url == message_url1 - assert 'x-watson-learning-opt-out' in responses.calls[0].request.headers - assert responses.calls[0].request.headers['x-watson-learning-opt-out'] == 'true' - assert responses.calls[0].response.text == json.dumps(message_response) - - # test context - responses.add( - responses.POST, - message_url, - body=message_response, - status=200, - content_type='application/json') - - message_ctx = Context._from_dict(message_response['context']) - message = assistant.message( - workspace_id=workspace_id, - input=MessageInput(text='Turn on the lights'), - context=message_ctx).get_result() - - assert message is not None - assert responses.calls[1].request.url == message_url1 - assert responses.calls[1].response.text == json.dumps(message_response) - - assert len(responses.calls) == 2 - - -######################### -# synonyms -######################### - - -@responses.activate -def test_create_synonym(): - endpoint = '/v1/workspaces/{0}/entities/{1}/values/{2}/synonyms'.format( - 'boguswid', 'aeiou', 'vowel') - url = '{0}{1}'.format(base_url, endpoint) - response = { - "synonym": "aeiou", - "created": "2000-01-23T04:56:07.000+00:00", - "updated": "2000-01-23T04:56:07.000+00:00" - } - responses.add( - responses.POST, - url, - body=json.dumps(response), - status=201, - content_type='application/json') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-04-21') - synonym = service.create_synonym( - workspace_id='boguswid', entity='aeiou', value='vowel', synonym='a').get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert synonym == response - # Verify that response can be converted to a Synonym - Synonym._from_dict(synonym) - -@responses.activate -def test_delete_synonym(): - endpoint = '/v1/workspaces/{0}/entities/{1}/values/{2}/synonyms/{3}'.format( - 'boguswid', 'aeiou', 'vowel', 'a') - url = '{0}{1}'.format(base_url, endpoint) - response = None - responses.add( - responses.DELETE, - url, - body=json.dumps(response), - status=204, - content_type='application/json') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-04-21') - synonym = service.delete_synonym( - workspace_id='boguswid', entity='aeiou', value='vowel', synonym='a').get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert synonym is None - - -@responses.activate -def test_get_synonym(): - endpoint = '/v1/workspaces/{0}/entities/{1}/values/{2}/synonyms/{3}'.format( - 'boguswid', 'grilling', 'bbq', 'barbecue') - url = '{0}{1}'.format(base_url, endpoint) - response = { - "synonym": "barbecue", - "created": "2015-12-06T23:53:59.153Z", - "updated": "2015-12-07T18:53:59.153Z" - } - responses.add( - responses.GET, - url, - body=json.dumps(response), - status=200, - content_type='application/json') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-04-21') - synonym = service.get_synonym( - workspace_id='boguswid', entity='grilling', value='bbq', synonym='barbecue').get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert synonym == response - # Verify that response can be converted to a Synonym - Synonym._from_dict(synonym) - - -@responses.activate -def test_list_synonyms(): - endpoint = '/v1/workspaces/{0}/entities/{1}/values/{2}/synonyms'.format( - 'boguswid', 'grilling', 'bbq') - url = '{0}{1}'.format(base_url, endpoint) - response = { - "synonyms": [{ - "synonym": "BBQ sauce", - "created": "2015-12-06T23:53:59.153Z", - "updated": "2015-12-07T18:53:59.153Z" - }, { - "synonym": "barbecue", - "created": "2015-12-06T23:53:59.153Z", - "updated": "2015-12-07T18:53:59.153Z" - }], - "pagination": { - "refresh_url": - "/v1/workspaces/pizza_app-e0f3/entities/sauce/values/types/synonyms?version=2017-12-18&filter=name:b&include_count=true&page_limit=2", - "next_url": - "/v1/workspaces/pizza_app-e0f3/entities/sauce/values/types/synonyms?cursor=base64=&version=2017-12-18&filter=name:b&page_limit=2", - "total": - 8, - "matched": - 2 + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_intent(**req_copy) + + def test_delete_intent_value_error_with_retries(self): + # Enable retries and run test_delete_intent_value_error. + _service.enable_retries() + self.test_delete_intent_value_error() + + # Disable retries and run test_delete_intent_value_error. + _service.disable_retries() + self.test_delete_intent_value_error() + + +# endregion +############################################################################## +# End of Service: Intents +############################################################################## + +############################################################################## +# Start of Service: Examples +############################################################################## +# region + + +class TestListExamples: + """ + Test Class for list_examples + """ + + @responses.activate + def test_list_examples_all_params(self): + """ + list_examples() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/intents/testString/examples') + mock_response = '{"examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + intent = 'testString' + page_limit = 100 + include_count = False + sort = 'text' + cursor = 'testString' + include_audit = False + + # Invoke method + response = _service.list_examples( + workspace_id, + intent, + page_limit=page_limit, + include_count=include_count, + sort=sort, + cursor=cursor, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'page_limit={}'.format(page_limit) in query_string + assert 'include_count={}'.format('true' if include_count else 'false') in query_string + assert 'sort={}'.format(sort) in query_string + assert 'cursor={}'.format(cursor) in query_string + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + + def test_list_examples_all_params_with_retries(self): + # Enable retries and run test_list_examples_all_params. + _service.enable_retries() + self.test_list_examples_all_params() + + # Disable retries and run test_list_examples_all_params. + _service.disable_retries() + self.test_list_examples_all_params() + + @responses.activate + def test_list_examples_required_params(self): + """ + test_list_examples_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/intents/testString/examples') + mock_response = '{"examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + intent = 'testString' + + # Invoke method + response = _service.list_examples( + workspace_id, + intent, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_examples_required_params_with_retries(self): + # Enable retries and run test_list_examples_required_params. + _service.enable_retries() + self.test_list_examples_required_params() + + # Disable retries and run test_list_examples_required_params. + _service.disable_retries() + self.test_list_examples_required_params() + + @responses.activate + def test_list_examples_value_error(self): + """ + test_list_examples_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/intents/testString/examples') + mock_response = '{"examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + intent = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + "intent": intent, } - } - responses.add( - responses.GET, - url, - body=json.dumps(response), - status=200, - content_type='application/json') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-04-21') - synonyms = service.list_synonyms( - workspace_id='boguswid', - entity='grilling', - value='bbq').get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert synonyms == response - # Verify that response can be converted to a SynonymCollection - SynonymCollection._from_dict(synonyms) - - -@responses.activate -def test_update_synonym(): - endpoint = '/v1/workspaces/{0}/entities/{1}/values/{2}/synonyms/{3}'.format( - 'boguswid', 'grilling', 'bbq', 'barbecue') - url = '{0}{1}'.format(base_url, endpoint) - response = { - "synonym": "barbecue", - "created": "2015-12-06T23:53:59.153Z", - "updated": "2015-12-07T18:53:59.153Z" - } - responses.add( - responses.POST, - url, - body=json.dumps(response), - status=200, - content_type='application/json') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-04-21') - synonym = service.update_synonym( - workspace_id='boguswid', entity='grilling', value='bbq', synonym='barbecue', new_synonym='barbecue').get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert synonym == response - # Verify that response can be converted to a Synonym - Synonym._from_dict(synonym) - - -######################### -# values -######################### - - -@responses.activate -def test_create_value(): - endpoint = '/v1/workspaces/{0}/entities/{1}/values'.format('boguswid', 'grilling') - url = '{0}{1}'.format(base_url, endpoint) - response = { - "metadata": "{}", - "created": "2000-01-23T04:56:07.000+00:00", - "value": "aeiou", - "type": "synonyms", - "updated": "2000-01-23T04:56:07.000+00:00" - } - responses.add( - responses.POST, - url, - body=json.dumps(response), - status=201, - content_type='application/json') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-04-21') - value = service.create_value( - workspace_id='boguswid', - entity='grilling', - value='aeiou').get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert value == response - # Verify that response can be converted to a Value - Value._from_dict(value) - - -@responses.activate -def test_delete_value(): - endpoint = '/v1/workspaces/{0}/entities/{1}/values/{2}'.format( - 'boguswid', 'grilling', 'bbq') - url = '{0}{1}'.format(base_url, endpoint) - response = "" - responses.add( - responses.DELETE, - url, - body=json.dumps(response), - status=200, - content_type='application/json') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-04-21') - value = service.delete_value( - workspace_id='boguswid', entity='grilling', value='bbq').get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert value == "" - - -@responses.activate -def test_get_value(): - endpoint = '/v1/workspaces/{0}/entities/{1}/values/{2}'.format( - 'boguswid', 'grilling', 'bbq') - url = '{0}{1}'.format(base_url, endpoint) - response = { - "value": "BBQ sauce", - "metadata": { - "code": 1422 - }, - "type": "synonyms", - "created": "2015-12-06T23:53:59.153Z", - "updated": "2015-12-07T18:53:59.153Z" - } - responses.add( - responses.GET, - url, - body=json.dumps(response), - status=200, - content_type='application/json') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-04-21') - value = service.get_value( - workspace_id='boguswid', entity='grilling', value='bbq', export=True).get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert value == response - # Verify that response can be converted to a Value - Value._from_dict(value) - - -@responses.activate -def test_list_values(): - endpoint = '/v1/workspaces/{0}/entities/{1}/values'.format('boguswid', 'grilling') - url = '{0}{1}'.format(base_url, endpoint) - response = { - "values": [{ - "value": "BBQ sauce", - "metadata": { - "code": 1422 - }, - "type": "synonyms", - "created": "2015-12-06T23:53:59.153Z", - "updated": "2015-12-07T18:53:59.153Z" - }], - "pagination": { - "refresh_url": - "/v1/workspaces/pizza_app-e0f3/entities/sauce/values?version=2017-12-18&filter=name:pizza&include_count=true&page_limit=1", - "next_url": - "/v1/workspaces/pizza_app-e0f3/sauce/values?cursor=base64=&version=2017-12-18&filter=name:pizza&page_limit=1", - "total": - 1, - "matched": - 1 + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.list_examples(**req_copy) + + def test_list_examples_value_error_with_retries(self): + # Enable retries and run test_list_examples_value_error. + _service.enable_retries() + self.test_list_examples_value_error() + + # Disable retries and run test_list_examples_value_error. + _service.disable_retries() + self.test_list_examples_value_error() + + +class TestCreateExample: + """ + Test Class for create_example + """ + + @responses.activate + def test_create_example_all_params(self): + """ + create_example() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/intents/testString/examples') + mock_response = '{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Construct a dict representation of a Mention model + mention_model = {} + mention_model['entity'] = 'testString' + mention_model['location'] = [38] + + # Set up parameter values + workspace_id = 'testString' + intent = 'testString' + text = 'testString' + mentions = [mention_model] + include_audit = False + + # Invoke method + response = _service.create_example( + workspace_id, + intent, + text, + mentions=mentions, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['text'] == 'testString' + assert req_body['mentions'] == [mention_model] + + def test_create_example_all_params_with_retries(self): + # Enable retries and run test_create_example_all_params. + _service.enable_retries() + self.test_create_example_all_params() + + # Disable retries and run test_create_example_all_params. + _service.disable_retries() + self.test_create_example_all_params() + + @responses.activate + def test_create_example_required_params(self): + """ + test_create_example_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/intents/testString/examples') + mock_response = '{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Construct a dict representation of a Mention model + mention_model = {} + mention_model['entity'] = 'testString' + mention_model['location'] = [38] + + # Set up parameter values + workspace_id = 'testString' + intent = 'testString' + text = 'testString' + mentions = [mention_model] + + # Invoke method + response = _service.create_example( + workspace_id, + intent, + text, + mentions=mentions, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['text'] == 'testString' + assert req_body['mentions'] == [mention_model] + + def test_create_example_required_params_with_retries(self): + # Enable retries and run test_create_example_required_params. + _service.enable_retries() + self.test_create_example_required_params() + + # Disable retries and run test_create_example_required_params. + _service.disable_retries() + self.test_create_example_required_params() + + @responses.activate + def test_create_example_value_error(self): + """ + test_create_example_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/intents/testString/examples') + mock_response = '{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Construct a dict representation of a Mention model + mention_model = {} + mention_model['entity'] = 'testString' + mention_model['location'] = [38] + + # Set up parameter values + workspace_id = 'testString' + intent = 'testString' + text = 'testString' + mentions = [mention_model] + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + "intent": intent, + "text": text, } - } - responses.add( - responses.GET, - url, - body=json.dumps(response), - status=200, - content_type='application/json') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-04-21') - values = service.list_values( - workspace_id='boguswid', - entity='grilling', - export=True).get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert values == response - # Verify that response can be converted to a ValueCollection - ValueCollection._from_dict(values) - - -@responses.activate -def test_update_value(): - endpoint = '/v1/workspaces/{0}/entities/{1}/values/{2}'.format( - 'boguswid', 'grilling', 'bbq') - url = '{0}{1}'.format(base_url, endpoint) - response = { - "value": "BBQ sauce", - "metadata": { - "code": 1422 - }, - "type": "synonyms", - "created": "2015-12-06T23:53:59.153Z", - "updated": "2015-12-06T23:53:59.153Z" - } - responses.add( - responses.POST, - url, - body=json.dumps(response), - status=200, - content_type='application/json') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-04-21') - value = service.update_value( - workspace_id='boguswid', - entity='grilling', - value='bbq', - new_value='BBQ sauce', - new_metadata={"code": 1422}, - new_synonyms=None).get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert value == response - # Verify that response can be converted to a Value - Value._from_dict(value) - - -######################### -# workspaces -######################### - - -@responses.activate -def test_create_workspace(): - endpoint = '/v1/workspaces' - url = '{0}{1}'.format(base_url, endpoint) - response = { - "name": "Pizza app", - "created": "2015-12-06T23:53:59.153Z", - "language": "en", - "metadata": {}, - "updated": "2015-12-06T23:53:59.153Z", - "description": "Pizza app", - "workspace_id": "pizza_app-e0f3", - "learning_opt_out": True - } - responses.add( - responses.POST, - url, - body=json.dumps(response), - status=201, - content_type='application/json') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-02-03') - workspace = service.create_workspace( - name='Pizza app', description='Pizza app', language='en', metadata={}, - system_settings={'tooling': {'store_generic_responses' : True, 'disambiguation': {'prompt': 'Hello world', 'enabled': True}}}).get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert workspace == response - # Verify that response can be converted to a Workspace - Workspace._from_dict(workspace) - -@responses.activate -def test_delete_workspace(): - endpoint = '/v1/workspaces/{0}'.format('boguswid') - url = '{0}{1}'.format(base_url, endpoint) - response = {} - responses.add( - responses.DELETE, - url, - body=json.dumps(response), - status=204, - content_type='') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-02-03') - workspace = service.delete_workspace(workspace_id='boguswid').get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert workspace is None - - -@responses.activate -def test_get_workspace(): - endpoint = '/v1/workspaces/{0}'.format('boguswid') - url = '{0}{1}'.format(base_url, endpoint) - response = { - "name": "Pizza app", - "created": "2015-12-06T23:53:59.153Z", - "language": "en", - "metadata": {}, - "updated": "2015-12-06T23:53:59.153Z", - "description": "Pizza app", - "status": "Available", - "learning_opt_out": False, - "workspace_id": "pizza_app-e0f3" - } - responses.add( - responses.GET, - url, - body=json.dumps(response), - status=200, - content_type='application/json') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-02-03') - workspace = service.get_workspace(workspace_id='boguswid', export=True, sort='stable').get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert workspace == response - # Verify that response can be converted to a Workspace - Workspace._from_dict(workspace) - - -@responses.activate -def test_list_workspaces(): - endpoint = '/v1/workspaces' - url = '{0}{1}'.format(base_url, endpoint) - response = { - "workspaces": [{ - "name": "Pizza app", - "created": "2015-12-06T23:53:59.153Z", - "language": "en", - "metadata": {}, - "updated": "2015-12-06T23:53:59.153Z", - "description": "Pizza app", - "workspace_id": "pizza_app-e0f3", - "learning_opt_out": True - }], - "pagination": { - "refresh_url": - "/v1/workspaces?version=2016-01-24&page_limit=1", - "next_url": - "/v1/workspaces?cursor=base64=&version=2016-01-24&page_limit=1" + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.create_example(**req_copy) + + def test_create_example_value_error_with_retries(self): + # Enable retries and run test_create_example_value_error. + _service.enable_retries() + self.test_create_example_value_error() + + # Disable retries and run test_create_example_value_error. + _service.disable_retries() + self.test_create_example_value_error() + + +class TestGetExample: + """ + Test Class for get_example + """ + + @responses.activate + def test_get_example_all_params(self): + """ + get_example() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/intents/testString/examples/testString') + mock_response = '{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + intent = 'testString' + text = 'testString' + include_audit = False + + # Invoke method + response = _service.get_example( + workspace_id, + intent, + text, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + + def test_get_example_all_params_with_retries(self): + # Enable retries and run test_get_example_all_params. + _service.enable_retries() + self.test_get_example_all_params() + + # Disable retries and run test_get_example_all_params. + _service.disable_retries() + self.test_get_example_all_params() + + @responses.activate + def test_get_example_required_params(self): + """ + test_get_example_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/intents/testString/examples/testString') + mock_response = '{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + intent = 'testString' + text = 'testString' + + # Invoke method + response = _service.get_example( + workspace_id, + intent, + text, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_get_example_required_params_with_retries(self): + # Enable retries and run test_get_example_required_params. + _service.enable_retries() + self.test_get_example_required_params() + + # Disable retries and run test_get_example_required_params. + _service.disable_retries() + self.test_get_example_required_params() + + @responses.activate + def test_get_example_value_error(self): + """ + test_get_example_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/intents/testString/examples/testString') + mock_response = '{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + intent = 'testString' + text = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + "intent": intent, + "text": text, } - } - responses.add( - responses.GET, - url, - body=json.dumps(response), - status=200, - content_type='application/json') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-02-03') - workspaces = service.list_workspaces().get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert workspaces == response - # Verify that response can be converted to a WorkspaceCollection - WorkspaceCollection._from_dict(workspaces) - - -@responses.activate -def test_update_workspace(): - endpoint = '/v1/workspaces/{0}'.format('pizza_app-e0f3') - url = '{0}{1}'.format(base_url, endpoint) - response = { - "name": "Pizza app", - "created": "2015-12-06T23:53:59.153Z", - "language": "en", - "metadata": {}, - "updated": "2015-12-06T23:53:59.153Z", - "description": "Pizza app", - "workspace_id": "pizza_app-e0f3", - "learning_opt_out": True - } - responses.add( - responses.POST, - url, - body=json.dumps(response), - status=200, - content_type='application/json') - service = ibm_watson.AssistantV1( - username='username', password='password', version='2017-02-03') - workspace = service.update_workspace( - workspace_id='pizza_app-e0f3', - name='Pizza app', - description='Pizza app', - language='en', - metadata={}, - system_settings={'tooling': {'store_generic_responses' : True, 'disambiguation': {'prompt': 'Hello world', 'enabled': True}}}).get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert workspace == response - # Verify that response can be converted to a Workspace - Workspace._from_dict(workspace) - -@responses.activate -def test_dialog_nodes(): - url = 'https://gateway.watsonplatform.net/assistant/api/v1/workspaces/id/dialog_nodes' - responses.add( - responses.GET, - url, - body='{ "application/json": { "dialog_node": "location-atm" }}', - status=200, - content_type='application/json') - - responses.add( - responses.POST, - "{0}?version=2017-05-26".format(url), - body='{ "application/json": { "dialog_node": "location-done" }}', - status=200, - content_type='application/json') - - responses.add( - responses.DELETE, - "{0}/location-done?version=2017-05-26".format(url), - body='{"description": "deleted successfully"}', - status=200, - content_type='application/json') - - responses.add( - responses.GET, - "{0}/location-done?version=2017-05-26".format(url), - body='{ "application/json": { "dialog_node": "location-atm" }}', - status=200, - content_type='application/json') - - assistant = ibm_watson.AssistantV1('2017-05-26', username="username", password="password") - - assistant.create_dialog_node('id', 'location-done', user_label='xxx') - assert responses.calls[0].response.json()['application/json']['dialog_node'] == 'location-done' - - assistant.delete_dialog_node('id', 'location-done') - assert responses.calls[1].response.json() == {"description": "deleted successfully"} - - assistant.get_dialog_node('id', 'location-done') - assert responses.calls[2].response.json() == {"application/json": {"dialog_node": "location-atm"}} - - assistant.list_dialog_nodes('id') - assert responses.calls[3].response.json() == {"application/json": {"dialog_node": "location-atm"}} - - assert len(responses.calls) == 4 - -@responses.activate -def test_delete_user_data(): - url = 'https://gateway.watsonplatform.net/assistant/api/v1/user_data' - responses.add( - responses.DELETE, - url, - body=None, - status=204, - content_type='application_json') - - assistant = ibm_watson.AssistantV1('2017-05-26', username="username", password="password") - - response = assistant.delete_user_data('id').get_result() - assert response is None - assert len(responses.calls) == 1 - -@responses.activate -def test_list_mentions(): - url = 'https://gateway.watsonplatform.net/assistant/api/v1/workspaces/workspace_id/entities/entity1/mentions' - responses.add( - responses.GET, - url, - body='[{"entity": "xxx"}]', - status=200, - content_type='application_json') - - assistant = ibm_watson.AssistantV1('2017-05-26', username="username", password="password") - - response = assistant.list_mentions('workspace_id', 'entity1').get_result() - assert response == [{"entity": "xxx"}] - assert len(responses.calls) == 1 + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_example(**req_copy) + + def test_get_example_value_error_with_retries(self): + # Enable retries and run test_get_example_value_error. + _service.enable_retries() + self.test_get_example_value_error() + + # Disable retries and run test_get_example_value_error. + _service.disable_retries() + self.test_get_example_value_error() + + +class TestUpdateExample: + """ + Test Class for update_example + """ + + @responses.activate + def test_update_example_all_params(self): + """ + update_example() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/intents/testString/examples/testString') + mock_response = '{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Construct a dict representation of a Mention model + mention_model = {} + mention_model['entity'] = 'testString' + mention_model['location'] = [38] + + # Set up parameter values + workspace_id = 'testString' + intent = 'testString' + text = 'testString' + new_text = 'testString' + new_mentions = [mention_model] + include_audit = False + + # Invoke method + response = _service.update_example( + workspace_id, + intent, + text, + new_text=new_text, + new_mentions=new_mentions, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['text'] == 'testString' + assert req_body['mentions'] == [mention_model] + + def test_update_example_all_params_with_retries(self): + # Enable retries and run test_update_example_all_params. + _service.enable_retries() + self.test_update_example_all_params() + + # Disable retries and run test_update_example_all_params. + _service.disable_retries() + self.test_update_example_all_params() + + @responses.activate + def test_update_example_required_params(self): + """ + test_update_example_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/intents/testString/examples/testString') + mock_response = '{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Construct a dict representation of a Mention model + mention_model = {} + mention_model['entity'] = 'testString' + mention_model['location'] = [38] + + # Set up parameter values + workspace_id = 'testString' + intent = 'testString' + text = 'testString' + new_text = 'testString' + new_mentions = [mention_model] + + # Invoke method + response = _service.update_example( + workspace_id, + intent, + text, + new_text=new_text, + new_mentions=new_mentions, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['text'] == 'testString' + assert req_body['mentions'] == [mention_model] + + def test_update_example_required_params_with_retries(self): + # Enable retries and run test_update_example_required_params. + _service.enable_retries() + self.test_update_example_required_params() + + # Disable retries and run test_update_example_required_params. + _service.disable_retries() + self.test_update_example_required_params() + + @responses.activate + def test_update_example_value_error(self): + """ + test_update_example_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/intents/testString/examples/testString') + mock_response = '{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Construct a dict representation of a Mention model + mention_model = {} + mention_model['entity'] = 'testString' + mention_model['location'] = [38] + + # Set up parameter values + workspace_id = 'testString' + intent = 'testString' + text = 'testString' + new_text = 'testString' + new_mentions = [mention_model] + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + "intent": intent, + "text": text, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.update_example(**req_copy) + + def test_update_example_value_error_with_retries(self): + # Enable retries and run test_update_example_value_error. + _service.enable_retries() + self.test_update_example_value_error() + + # Disable retries and run test_update_example_value_error. + _service.disable_retries() + self.test_update_example_value_error() + + +class TestDeleteExample: + """ + Test Class for delete_example + """ + + @responses.activate + def test_delete_example_all_params(self): + """ + delete_example() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/intents/testString/examples/testString') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + intent = 'testString' + text = 'testString' + + # Invoke method + response = _service.delete_example( + workspace_id, + intent, + text, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_delete_example_all_params_with_retries(self): + # Enable retries and run test_delete_example_all_params. + _service.enable_retries() + self.test_delete_example_all_params() + + # Disable retries and run test_delete_example_all_params. + _service.disable_retries() + self.test_delete_example_all_params() + + @responses.activate + def test_delete_example_value_error(self): + """ + test_delete_example_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/intents/testString/examples/testString') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + intent = 'testString' + text = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + "intent": intent, + "text": text, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_example(**req_copy) + + def test_delete_example_value_error_with_retries(self): + # Enable retries and run test_delete_example_value_error. + _service.enable_retries() + self.test_delete_example_value_error() + + # Disable retries and run test_delete_example_value_error. + _service.disable_retries() + self.test_delete_example_value_error() + + +# endregion +############################################################################## +# End of Service: Examples +############################################################################## + +############################################################################## +# Start of Service: Counterexamples +############################################################################## +# region + + +class TestListCounterexamples: + """ + Test Class for list_counterexamples + """ + + @responses.activate + def test_list_counterexamples_all_params(self): + """ + list_counterexamples() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/counterexamples') + mock_response = '{"counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + page_limit = 100 + include_count = False + sort = 'text' + cursor = 'testString' + include_audit = False + + # Invoke method + response = _service.list_counterexamples( + workspace_id, + page_limit=page_limit, + include_count=include_count, + sort=sort, + cursor=cursor, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'page_limit={}'.format(page_limit) in query_string + assert 'include_count={}'.format('true' if include_count else 'false') in query_string + assert 'sort={}'.format(sort) in query_string + assert 'cursor={}'.format(cursor) in query_string + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + + def test_list_counterexamples_all_params_with_retries(self): + # Enable retries and run test_list_counterexamples_all_params. + _service.enable_retries() + self.test_list_counterexamples_all_params() + + # Disable retries and run test_list_counterexamples_all_params. + _service.disable_retries() + self.test_list_counterexamples_all_params() + + @responses.activate + def test_list_counterexamples_required_params(self): + """ + test_list_counterexamples_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/counterexamples') + mock_response = '{"counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + + # Invoke method + response = _service.list_counterexamples( + workspace_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_counterexamples_required_params_with_retries(self): + # Enable retries and run test_list_counterexamples_required_params. + _service.enable_retries() + self.test_list_counterexamples_required_params() + + # Disable retries and run test_list_counterexamples_required_params. + _service.disable_retries() + self.test_list_counterexamples_required_params() + + @responses.activate + def test_list_counterexamples_value_error(self): + """ + test_list_counterexamples_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/counterexamples') + mock_response = '{"counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.list_counterexamples(**req_copy) + + def test_list_counterexamples_value_error_with_retries(self): + # Enable retries and run test_list_counterexamples_value_error. + _service.enable_retries() + self.test_list_counterexamples_value_error() + + # Disable retries and run test_list_counterexamples_value_error. + _service.disable_retries() + self.test_list_counterexamples_value_error() + + +class TestCreateCounterexample: + """ + Test Class for create_counterexample + """ + + @responses.activate + def test_create_counterexample_all_params(self): + """ + create_counterexample() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/counterexamples') + mock_response = '{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Set up parameter values + workspace_id = 'testString' + text = 'testString' + include_audit = False + + # Invoke method + response = _service.create_counterexample( + workspace_id, + text, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['text'] == 'testString' + + def test_create_counterexample_all_params_with_retries(self): + # Enable retries and run test_create_counterexample_all_params. + _service.enable_retries() + self.test_create_counterexample_all_params() + + # Disable retries and run test_create_counterexample_all_params. + _service.disable_retries() + self.test_create_counterexample_all_params() + + @responses.activate + def test_create_counterexample_required_params(self): + """ + test_create_counterexample_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/counterexamples') + mock_response = '{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Set up parameter values + workspace_id = 'testString' + text = 'testString' + + # Invoke method + response = _service.create_counterexample( + workspace_id, + text, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['text'] == 'testString' + + def test_create_counterexample_required_params_with_retries(self): + # Enable retries and run test_create_counterexample_required_params. + _service.enable_retries() + self.test_create_counterexample_required_params() + + # Disable retries and run test_create_counterexample_required_params. + _service.disable_retries() + self.test_create_counterexample_required_params() + + @responses.activate + def test_create_counterexample_value_error(self): + """ + test_create_counterexample_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/counterexamples') + mock_response = '{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Set up parameter values + workspace_id = 'testString' + text = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + "text": text, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.create_counterexample(**req_copy) + + def test_create_counterexample_value_error_with_retries(self): + # Enable retries and run test_create_counterexample_value_error. + _service.enable_retries() + self.test_create_counterexample_value_error() + + # Disable retries and run test_create_counterexample_value_error. + _service.disable_retries() + self.test_create_counterexample_value_error() + + +class TestGetCounterexample: + """ + Test Class for get_counterexample + """ + + @responses.activate + def test_get_counterexample_all_params(self): + """ + get_counterexample() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/counterexamples/testString') + mock_response = '{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + text = 'testString' + include_audit = False + + # Invoke method + response = _service.get_counterexample( + workspace_id, + text, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + + def test_get_counterexample_all_params_with_retries(self): + # Enable retries and run test_get_counterexample_all_params. + _service.enable_retries() + self.test_get_counterexample_all_params() + + # Disable retries and run test_get_counterexample_all_params. + _service.disable_retries() + self.test_get_counterexample_all_params() + + @responses.activate + def test_get_counterexample_required_params(self): + """ + test_get_counterexample_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/counterexamples/testString') + mock_response = '{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + text = 'testString' + + # Invoke method + response = _service.get_counterexample( + workspace_id, + text, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_get_counterexample_required_params_with_retries(self): + # Enable retries and run test_get_counterexample_required_params. + _service.enable_retries() + self.test_get_counterexample_required_params() + + # Disable retries and run test_get_counterexample_required_params. + _service.disable_retries() + self.test_get_counterexample_required_params() + + @responses.activate + def test_get_counterexample_value_error(self): + """ + test_get_counterexample_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/counterexamples/testString') + mock_response = '{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + text = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + "text": text, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_counterexample(**req_copy) + + def test_get_counterexample_value_error_with_retries(self): + # Enable retries and run test_get_counterexample_value_error. + _service.enable_retries() + self.test_get_counterexample_value_error() + + # Disable retries and run test_get_counterexample_value_error. + _service.disable_retries() + self.test_get_counterexample_value_error() + + +class TestUpdateCounterexample: + """ + Test Class for update_counterexample + """ + + @responses.activate + def test_update_counterexample_all_params(self): + """ + update_counterexample() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/counterexamples/testString') + mock_response = '{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + text = 'testString' + new_text = 'testString' + include_audit = False + + # Invoke method + response = _service.update_counterexample( + workspace_id, + text, + new_text=new_text, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['text'] == 'testString' + + def test_update_counterexample_all_params_with_retries(self): + # Enable retries and run test_update_counterexample_all_params. + _service.enable_retries() + self.test_update_counterexample_all_params() + + # Disable retries and run test_update_counterexample_all_params. + _service.disable_retries() + self.test_update_counterexample_all_params() + + @responses.activate + def test_update_counterexample_required_params(self): + """ + test_update_counterexample_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/counterexamples/testString') + mock_response = '{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + text = 'testString' + new_text = 'testString' + + # Invoke method + response = _service.update_counterexample( + workspace_id, + text, + new_text=new_text, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['text'] == 'testString' + + def test_update_counterexample_required_params_with_retries(self): + # Enable retries and run test_update_counterexample_required_params. + _service.enable_retries() + self.test_update_counterexample_required_params() + + # Disable retries and run test_update_counterexample_required_params. + _service.disable_retries() + self.test_update_counterexample_required_params() + + @responses.activate + def test_update_counterexample_value_error(self): + """ + test_update_counterexample_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/counterexamples/testString') + mock_response = '{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + text = 'testString' + new_text = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + "text": text, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.update_counterexample(**req_copy) + + def test_update_counterexample_value_error_with_retries(self): + # Enable retries and run test_update_counterexample_value_error. + _service.enable_retries() + self.test_update_counterexample_value_error() + + # Disable retries and run test_update_counterexample_value_error. + _service.disable_retries() + self.test_update_counterexample_value_error() + + +class TestDeleteCounterexample: + """ + Test Class for delete_counterexample + """ + + @responses.activate + def test_delete_counterexample_all_params(self): + """ + delete_counterexample() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/counterexamples/testString') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + text = 'testString' + + # Invoke method + response = _service.delete_counterexample( + workspace_id, + text, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_delete_counterexample_all_params_with_retries(self): + # Enable retries and run test_delete_counterexample_all_params. + _service.enable_retries() + self.test_delete_counterexample_all_params() + + # Disable retries and run test_delete_counterexample_all_params. + _service.disable_retries() + self.test_delete_counterexample_all_params() + + @responses.activate + def test_delete_counterexample_value_error(self): + """ + test_delete_counterexample_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/counterexamples/testString') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + text = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + "text": text, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_counterexample(**req_copy) + + def test_delete_counterexample_value_error_with_retries(self): + # Enable retries and run test_delete_counterexample_value_error. + _service.enable_retries() + self.test_delete_counterexample_value_error() + + # Disable retries and run test_delete_counterexample_value_error. + _service.disable_retries() + self.test_delete_counterexample_value_error() + + +# endregion +############################################################################## +# End of Service: Counterexamples +############################################################################## + +############################################################################## +# Start of Service: Entities +############################################################################## +# region + + +class TestListEntities: + """ + Test Class for list_entities + """ + + @responses.activate + def test_list_entities_all_params(self): + """ + list_entities() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities') + mock_response = '{"entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + export = False + page_limit = 100 + include_count = False + sort = 'entity' + cursor = 'testString' + include_audit = False + + # Invoke method + response = _service.list_entities( + workspace_id, + export=export, + page_limit=page_limit, + include_count=include_count, + sort=sort, + cursor=cursor, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'export={}'.format('true' if export else 'false') in query_string + assert 'page_limit={}'.format(page_limit) in query_string + assert 'include_count={}'.format('true' if include_count else 'false') in query_string + assert 'sort={}'.format(sort) in query_string + assert 'cursor={}'.format(cursor) in query_string + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + + def test_list_entities_all_params_with_retries(self): + # Enable retries and run test_list_entities_all_params. + _service.enable_retries() + self.test_list_entities_all_params() + + # Disable retries and run test_list_entities_all_params. + _service.disable_retries() + self.test_list_entities_all_params() + + @responses.activate + def test_list_entities_required_params(self): + """ + test_list_entities_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities') + mock_response = '{"entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + + # Invoke method + response = _service.list_entities( + workspace_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_entities_required_params_with_retries(self): + # Enable retries and run test_list_entities_required_params. + _service.enable_retries() + self.test_list_entities_required_params() + + # Disable retries and run test_list_entities_required_params. + _service.disable_retries() + self.test_list_entities_required_params() + + @responses.activate + def test_list_entities_value_error(self): + """ + test_list_entities_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities') + mock_response = '{"entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.list_entities(**req_copy) + + def test_list_entities_value_error_with_retries(self): + # Enable retries and run test_list_entities_value_error. + _service.enable_retries() + self.test_list_entities_value_error() + + # Disable retries and run test_list_entities_value_error. + _service.disable_retries() + self.test_list_entities_value_error() + + +class TestCreateEntity: + """ + Test Class for create_entity + """ + + @responses.activate + def test_create_entity_all_params(self): + """ + create_entity() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities') + mock_response = '{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Construct a dict representation of a CreateValue model + create_value_model = {} + create_value_model['value'] = 'testString' + create_value_model['metadata'] = {'anyKey': 'anyValue'} + create_value_model['type'] = 'synonyms' + create_value_model['synonyms'] = ['testString'] + create_value_model['patterns'] = ['testString'] + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + description = 'testString' + metadata = {'anyKey': 'anyValue'} + fuzzy_match = True + values = [create_value_model] + include_audit = False + + # Invoke method + response = _service.create_entity( + workspace_id, + entity, + description=description, + metadata=metadata, + fuzzy_match=fuzzy_match, + values=values, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['entity'] == 'testString' + assert req_body['description'] == 'testString' + assert req_body['metadata'] == {'anyKey': 'anyValue'} + assert req_body['fuzzy_match'] == True + assert req_body['values'] == [create_value_model] + + def test_create_entity_all_params_with_retries(self): + # Enable retries and run test_create_entity_all_params. + _service.enable_retries() + self.test_create_entity_all_params() + + # Disable retries and run test_create_entity_all_params. + _service.disable_retries() + self.test_create_entity_all_params() + + @responses.activate + def test_create_entity_required_params(self): + """ + test_create_entity_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities') + mock_response = '{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Construct a dict representation of a CreateValue model + create_value_model = {} + create_value_model['value'] = 'testString' + create_value_model['metadata'] = {'anyKey': 'anyValue'} + create_value_model['type'] = 'synonyms' + create_value_model['synonyms'] = ['testString'] + create_value_model['patterns'] = ['testString'] + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + description = 'testString' + metadata = {'anyKey': 'anyValue'} + fuzzy_match = True + values = [create_value_model] + + # Invoke method + response = _service.create_entity( + workspace_id, + entity, + description=description, + metadata=metadata, + fuzzy_match=fuzzy_match, + values=values, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['entity'] == 'testString' + assert req_body['description'] == 'testString' + assert req_body['metadata'] == {'anyKey': 'anyValue'} + assert req_body['fuzzy_match'] == True + assert req_body['values'] == [create_value_model] + + def test_create_entity_required_params_with_retries(self): + # Enable retries and run test_create_entity_required_params. + _service.enable_retries() + self.test_create_entity_required_params() + + # Disable retries and run test_create_entity_required_params. + _service.disable_retries() + self.test_create_entity_required_params() + + @responses.activate + def test_create_entity_value_error(self): + """ + test_create_entity_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities') + mock_response = '{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Construct a dict representation of a CreateValue model + create_value_model = {} + create_value_model['value'] = 'testString' + create_value_model['metadata'] = {'anyKey': 'anyValue'} + create_value_model['type'] = 'synonyms' + create_value_model['synonyms'] = ['testString'] + create_value_model['patterns'] = ['testString'] + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + description = 'testString' + metadata = {'anyKey': 'anyValue'} + fuzzy_match = True + values = [create_value_model] + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + "entity": entity, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.create_entity(**req_copy) + + def test_create_entity_value_error_with_retries(self): + # Enable retries and run test_create_entity_value_error. + _service.enable_retries() + self.test_create_entity_value_error() + + # Disable retries and run test_create_entity_value_error. + _service.disable_retries() + self.test_create_entity_value_error() + + +class TestGetEntity: + """ + Test Class for get_entity + """ + + @responses.activate + def test_get_entity_all_params(self): + """ + get_entity() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString') + mock_response = '{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + export = False + include_audit = False + + # Invoke method + response = _service.get_entity( + workspace_id, + entity, + export=export, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'export={}'.format('true' if export else 'false') in query_string + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + + def test_get_entity_all_params_with_retries(self): + # Enable retries and run test_get_entity_all_params. + _service.enable_retries() + self.test_get_entity_all_params() + + # Disable retries and run test_get_entity_all_params. + _service.disable_retries() + self.test_get_entity_all_params() + + @responses.activate + def test_get_entity_required_params(self): + """ + test_get_entity_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString') + mock_response = '{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + + # Invoke method + response = _service.get_entity( + workspace_id, + entity, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_get_entity_required_params_with_retries(self): + # Enable retries and run test_get_entity_required_params. + _service.enable_retries() + self.test_get_entity_required_params() + + # Disable retries and run test_get_entity_required_params. + _service.disable_retries() + self.test_get_entity_required_params() + + @responses.activate + def test_get_entity_value_error(self): + """ + test_get_entity_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString') + mock_response = '{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + "entity": entity, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_entity(**req_copy) + + def test_get_entity_value_error_with_retries(self): + # Enable retries and run test_get_entity_value_error. + _service.enable_retries() + self.test_get_entity_value_error() + + # Disable retries and run test_get_entity_value_error. + _service.disable_retries() + self.test_get_entity_value_error() + + +class TestUpdateEntity: + """ + Test Class for update_entity + """ + + @responses.activate + def test_update_entity_all_params(self): + """ + update_entity() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString') + mock_response = '{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Construct a dict representation of a CreateValue model + create_value_model = {} + create_value_model['value'] = 'testString' + create_value_model['metadata'] = {'anyKey': 'anyValue'} + create_value_model['type'] = 'synonyms' + create_value_model['synonyms'] = ['testString'] + create_value_model['patterns'] = ['testString'] + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + new_entity = 'testString' + new_description = 'testString' + new_metadata = {'anyKey': 'anyValue'} + new_fuzzy_match = True + new_values = [create_value_model] + append = False + include_audit = False + + # Invoke method + response = _service.update_entity( + workspace_id, + entity, + new_entity=new_entity, + new_description=new_description, + new_metadata=new_metadata, + new_fuzzy_match=new_fuzzy_match, + new_values=new_values, + append=append, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'append={}'.format('true' if append else 'false') in query_string + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['entity'] == 'testString' + assert req_body['description'] == 'testString' + assert req_body['metadata'] == {'anyKey': 'anyValue'} + assert req_body['fuzzy_match'] == True + assert req_body['values'] == [create_value_model] + + def test_update_entity_all_params_with_retries(self): + # Enable retries and run test_update_entity_all_params. + _service.enable_retries() + self.test_update_entity_all_params() + + # Disable retries and run test_update_entity_all_params. + _service.disable_retries() + self.test_update_entity_all_params() + + @responses.activate + def test_update_entity_required_params(self): + """ + test_update_entity_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString') + mock_response = '{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Construct a dict representation of a CreateValue model + create_value_model = {} + create_value_model['value'] = 'testString' + create_value_model['metadata'] = {'anyKey': 'anyValue'} + create_value_model['type'] = 'synonyms' + create_value_model['synonyms'] = ['testString'] + create_value_model['patterns'] = ['testString'] + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + new_entity = 'testString' + new_description = 'testString' + new_metadata = {'anyKey': 'anyValue'} + new_fuzzy_match = True + new_values = [create_value_model] + + # Invoke method + response = _service.update_entity( + workspace_id, + entity, + new_entity=new_entity, + new_description=new_description, + new_metadata=new_metadata, + new_fuzzy_match=new_fuzzy_match, + new_values=new_values, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['entity'] == 'testString' + assert req_body['description'] == 'testString' + assert req_body['metadata'] == {'anyKey': 'anyValue'} + assert req_body['fuzzy_match'] == True + assert req_body['values'] == [create_value_model] + + def test_update_entity_required_params_with_retries(self): + # Enable retries and run test_update_entity_required_params. + _service.enable_retries() + self.test_update_entity_required_params() + + # Disable retries and run test_update_entity_required_params. + _service.disable_retries() + self.test_update_entity_required_params() + + @responses.activate + def test_update_entity_value_error(self): + """ + test_update_entity_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString') + mock_response = '{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Construct a dict representation of a CreateValue model + create_value_model = {} + create_value_model['value'] = 'testString' + create_value_model['metadata'] = {'anyKey': 'anyValue'} + create_value_model['type'] = 'synonyms' + create_value_model['synonyms'] = ['testString'] + create_value_model['patterns'] = ['testString'] + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + new_entity = 'testString' + new_description = 'testString' + new_metadata = {'anyKey': 'anyValue'} + new_fuzzy_match = True + new_values = [create_value_model] + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + "entity": entity, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.update_entity(**req_copy) + + def test_update_entity_value_error_with_retries(self): + # Enable retries and run test_update_entity_value_error. + _service.enable_retries() + self.test_update_entity_value_error() + + # Disable retries and run test_update_entity_value_error. + _service.disable_retries() + self.test_update_entity_value_error() + + +class TestDeleteEntity: + """ + Test Class for delete_entity + """ + + @responses.activate + def test_delete_entity_all_params(self): + """ + delete_entity() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + + # Invoke method + response = _service.delete_entity( + workspace_id, + entity, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_delete_entity_all_params_with_retries(self): + # Enable retries and run test_delete_entity_all_params. + _service.enable_retries() + self.test_delete_entity_all_params() + + # Disable retries and run test_delete_entity_all_params. + _service.disable_retries() + self.test_delete_entity_all_params() + + @responses.activate + def test_delete_entity_value_error(self): + """ + test_delete_entity_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + "entity": entity, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_entity(**req_copy) + + def test_delete_entity_value_error_with_retries(self): + # Enable retries and run test_delete_entity_value_error. + _service.enable_retries() + self.test_delete_entity_value_error() + + # Disable retries and run test_delete_entity_value_error. + _service.disable_retries() + self.test_delete_entity_value_error() + + +# endregion +############################################################################## +# End of Service: Entities +############################################################################## + +############################################################################## +# Start of Service: Mentions +############################################################################## +# region + + +class TestListMentions: + """ + Test Class for list_mentions + """ + + @responses.activate + def test_list_mentions_all_params(self): + """ + list_mentions() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString/mentions') + mock_response = '{"examples": [{"text": "text", "intent": "intent", "location": [8]}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + export = False + include_audit = False + + # Invoke method + response = _service.list_mentions( + workspace_id, + entity, + export=export, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'export={}'.format('true' if export else 'false') in query_string + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + + def test_list_mentions_all_params_with_retries(self): + # Enable retries and run test_list_mentions_all_params. + _service.enable_retries() + self.test_list_mentions_all_params() + + # Disable retries and run test_list_mentions_all_params. + _service.disable_retries() + self.test_list_mentions_all_params() + + @responses.activate + def test_list_mentions_required_params(self): + """ + test_list_mentions_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString/mentions') + mock_response = '{"examples": [{"text": "text", "intent": "intent", "location": [8]}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + + # Invoke method + response = _service.list_mentions( + workspace_id, + entity, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_mentions_required_params_with_retries(self): + # Enable retries and run test_list_mentions_required_params. + _service.enable_retries() + self.test_list_mentions_required_params() + + # Disable retries and run test_list_mentions_required_params. + _service.disable_retries() + self.test_list_mentions_required_params() + + @responses.activate + def test_list_mentions_value_error(self): + """ + test_list_mentions_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString/mentions') + mock_response = '{"examples": [{"text": "text", "intent": "intent", "location": [8]}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + "entity": entity, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.list_mentions(**req_copy) + + def test_list_mentions_value_error_with_retries(self): + # Enable retries and run test_list_mentions_value_error. + _service.enable_retries() + self.test_list_mentions_value_error() + + # Disable retries and run test_list_mentions_value_error. + _service.disable_retries() + self.test_list_mentions_value_error() + + +# endregion +############################################################################## +# End of Service: Mentions +############################################################################## + +############################################################################## +# Start of Service: Values +############################################################################## +# region + + +class TestListValues: + """ + Test Class for list_values + """ + + @responses.activate + def test_list_values_all_params(self): + """ + list_values() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString/values') + mock_response = '{"values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + export = False + page_limit = 100 + include_count = False + sort = 'value' + cursor = 'testString' + include_audit = False + + # Invoke method + response = _service.list_values( + workspace_id, + entity, + export=export, + page_limit=page_limit, + include_count=include_count, + sort=sort, + cursor=cursor, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'export={}'.format('true' if export else 'false') in query_string + assert 'page_limit={}'.format(page_limit) in query_string + assert 'include_count={}'.format('true' if include_count else 'false') in query_string + assert 'sort={}'.format(sort) in query_string + assert 'cursor={}'.format(cursor) in query_string + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + + def test_list_values_all_params_with_retries(self): + # Enable retries and run test_list_values_all_params. + _service.enable_retries() + self.test_list_values_all_params() + + # Disable retries and run test_list_values_all_params. + _service.disable_retries() + self.test_list_values_all_params() + + @responses.activate + def test_list_values_required_params(self): + """ + test_list_values_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString/values') + mock_response = '{"values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + + # Invoke method + response = _service.list_values( + workspace_id, + entity, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_values_required_params_with_retries(self): + # Enable retries and run test_list_values_required_params. + _service.enable_retries() + self.test_list_values_required_params() + + # Disable retries and run test_list_values_required_params. + _service.disable_retries() + self.test_list_values_required_params() + + @responses.activate + def test_list_values_value_error(self): + """ + test_list_values_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString/values') + mock_response = '{"values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + "entity": entity, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.list_values(**req_copy) + + def test_list_values_value_error_with_retries(self): + # Enable retries and run test_list_values_value_error. + _service.enable_retries() + self.test_list_values_value_error() + + # Disable retries and run test_list_values_value_error. + _service.disable_retries() + self.test_list_values_value_error() + + +class TestCreateValue: + """ + Test Class for create_value + """ + + @responses.activate + def test_create_value_all_params(self): + """ + create_value() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString/values') + mock_response = '{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + value = 'testString' + metadata = {'anyKey': 'anyValue'} + type = 'synonyms' + synonyms = ['testString'] + patterns = ['testString'] + include_audit = False + + # Invoke method + response = _service.create_value( + workspace_id, + entity, + value, + metadata=metadata, + type=type, + synonyms=synonyms, + patterns=patterns, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['value'] == 'testString' + assert req_body['metadata'] == {'anyKey': 'anyValue'} + assert req_body['type'] == 'synonyms' + assert req_body['synonyms'] == ['testString'] + assert req_body['patterns'] == ['testString'] + + def test_create_value_all_params_with_retries(self): + # Enable retries and run test_create_value_all_params. + _service.enable_retries() + self.test_create_value_all_params() + + # Disable retries and run test_create_value_all_params. + _service.disable_retries() + self.test_create_value_all_params() + + @responses.activate + def test_create_value_required_params(self): + """ + test_create_value_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString/values') + mock_response = '{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + value = 'testString' + metadata = {'anyKey': 'anyValue'} + type = 'synonyms' + synonyms = ['testString'] + patterns = ['testString'] + + # Invoke method + response = _service.create_value( + workspace_id, + entity, + value, + metadata=metadata, + type=type, + synonyms=synonyms, + patterns=patterns, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['value'] == 'testString' + assert req_body['metadata'] == {'anyKey': 'anyValue'} + assert req_body['type'] == 'synonyms' + assert req_body['synonyms'] == ['testString'] + assert req_body['patterns'] == ['testString'] + + def test_create_value_required_params_with_retries(self): + # Enable retries and run test_create_value_required_params. + _service.enable_retries() + self.test_create_value_required_params() + + # Disable retries and run test_create_value_required_params. + _service.disable_retries() + self.test_create_value_required_params() + + @responses.activate + def test_create_value_value_error(self): + """ + test_create_value_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString/values') + mock_response = '{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + value = 'testString' + metadata = {'anyKey': 'anyValue'} + type = 'synonyms' + synonyms = ['testString'] + patterns = ['testString'] + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + "entity": entity, + "value": value, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.create_value(**req_copy) + + def test_create_value_value_error_with_retries(self): + # Enable retries and run test_create_value_value_error. + _service.enable_retries() + self.test_create_value_value_error() + + # Disable retries and run test_create_value_value_error. + _service.disable_retries() + self.test_create_value_value_error() + + +class TestGetValue: + """ + Test Class for get_value + """ + + @responses.activate + def test_get_value_all_params(self): + """ + get_value() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString') + mock_response = '{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + value = 'testString' + export = False + include_audit = False + + # Invoke method + response = _service.get_value( + workspace_id, + entity, + value, + export=export, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'export={}'.format('true' if export else 'false') in query_string + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + + def test_get_value_all_params_with_retries(self): + # Enable retries and run test_get_value_all_params. + _service.enable_retries() + self.test_get_value_all_params() + + # Disable retries and run test_get_value_all_params. + _service.disable_retries() + self.test_get_value_all_params() + + @responses.activate + def test_get_value_required_params(self): + """ + test_get_value_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString') + mock_response = '{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + value = 'testString' + + # Invoke method + response = _service.get_value( + workspace_id, + entity, + value, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_get_value_required_params_with_retries(self): + # Enable retries and run test_get_value_required_params. + _service.enable_retries() + self.test_get_value_required_params() + + # Disable retries and run test_get_value_required_params. + _service.disable_retries() + self.test_get_value_required_params() + + @responses.activate + def test_get_value_value_error(self): + """ + test_get_value_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString') + mock_response = '{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + value = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + "entity": entity, + "value": value, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_value(**req_copy) + + def test_get_value_value_error_with_retries(self): + # Enable retries and run test_get_value_value_error. + _service.enable_retries() + self.test_get_value_value_error() + + # Disable retries and run test_get_value_value_error. + _service.disable_retries() + self.test_get_value_value_error() + + +class TestUpdateValue: + """ + Test Class for update_value + """ + + @responses.activate + def test_update_value_all_params(self): + """ + update_value() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString') + mock_response = '{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + value = 'testString' + new_value = 'testString' + new_metadata = {'anyKey': 'anyValue'} + new_type = 'synonyms' + new_synonyms = ['testString'] + new_patterns = ['testString'] + append = False + include_audit = False + + # Invoke method + response = _service.update_value( + workspace_id, + entity, + value, + new_value=new_value, + new_metadata=new_metadata, + new_type=new_type, + new_synonyms=new_synonyms, + new_patterns=new_patterns, + append=append, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'append={}'.format('true' if append else 'false') in query_string + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['value'] == 'testString' + assert req_body['metadata'] == {'anyKey': 'anyValue'} + assert req_body['type'] == 'synonyms' + assert req_body['synonyms'] == ['testString'] + assert req_body['patterns'] == ['testString'] + + def test_update_value_all_params_with_retries(self): + # Enable retries and run test_update_value_all_params. + _service.enable_retries() + self.test_update_value_all_params() + + # Disable retries and run test_update_value_all_params. + _service.disable_retries() + self.test_update_value_all_params() + + @responses.activate + def test_update_value_required_params(self): + """ + test_update_value_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString') + mock_response = '{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + value = 'testString' + new_value = 'testString' + new_metadata = {'anyKey': 'anyValue'} + new_type = 'synonyms' + new_synonyms = ['testString'] + new_patterns = ['testString'] + + # Invoke method + response = _service.update_value( + workspace_id, + entity, + value, + new_value=new_value, + new_metadata=new_metadata, + new_type=new_type, + new_synonyms=new_synonyms, + new_patterns=new_patterns, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['value'] == 'testString' + assert req_body['metadata'] == {'anyKey': 'anyValue'} + assert req_body['type'] == 'synonyms' + assert req_body['synonyms'] == ['testString'] + assert req_body['patterns'] == ['testString'] + + def test_update_value_required_params_with_retries(self): + # Enable retries and run test_update_value_required_params. + _service.enable_retries() + self.test_update_value_required_params() + + # Disable retries and run test_update_value_required_params. + _service.disable_retries() + self.test_update_value_required_params() + + @responses.activate + def test_update_value_value_error(self): + """ + test_update_value_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString') + mock_response = '{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + value = 'testString' + new_value = 'testString' + new_metadata = {'anyKey': 'anyValue'} + new_type = 'synonyms' + new_synonyms = ['testString'] + new_patterns = ['testString'] + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + "entity": entity, + "value": value, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.update_value(**req_copy) + + def test_update_value_value_error_with_retries(self): + # Enable retries and run test_update_value_value_error. + _service.enable_retries() + self.test_update_value_value_error() + + # Disable retries and run test_update_value_value_error. + _service.disable_retries() + self.test_update_value_value_error() + + +class TestDeleteValue: + """ + Test Class for delete_value + """ + + @responses.activate + def test_delete_value_all_params(self): + """ + delete_value() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + value = 'testString' + + # Invoke method + response = _service.delete_value( + workspace_id, + entity, + value, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_delete_value_all_params_with_retries(self): + # Enable retries and run test_delete_value_all_params. + _service.enable_retries() + self.test_delete_value_all_params() + + # Disable retries and run test_delete_value_all_params. + _service.disable_retries() + self.test_delete_value_all_params() + + @responses.activate + def test_delete_value_value_error(self): + """ + test_delete_value_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + value = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + "entity": entity, + "value": value, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_value(**req_copy) + + def test_delete_value_value_error_with_retries(self): + # Enable retries and run test_delete_value_value_error. + _service.enable_retries() + self.test_delete_value_value_error() + + # Disable retries and run test_delete_value_value_error. + _service.disable_retries() + self.test_delete_value_value_error() + + +# endregion +############################################################################## +# End of Service: Values +############################################################################## + +############################################################################## +# Start of Service: Synonyms +############################################################################## +# region + + +class TestListSynonyms: + """ + Test Class for list_synonyms + """ + + @responses.activate + def test_list_synonyms_all_params(self): + """ + list_synonyms() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString/synonyms') + mock_response = '{"synonyms": [{"synonym": "synonym", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + value = 'testString' + page_limit = 100 + include_count = False + sort = 'synonym' + cursor = 'testString' + include_audit = False + + # Invoke method + response = _service.list_synonyms( + workspace_id, + entity, + value, + page_limit=page_limit, + include_count=include_count, + sort=sort, + cursor=cursor, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'page_limit={}'.format(page_limit) in query_string + assert 'include_count={}'.format('true' if include_count else 'false') in query_string + assert 'sort={}'.format(sort) in query_string + assert 'cursor={}'.format(cursor) in query_string + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + + def test_list_synonyms_all_params_with_retries(self): + # Enable retries and run test_list_synonyms_all_params. + _service.enable_retries() + self.test_list_synonyms_all_params() + + # Disable retries and run test_list_synonyms_all_params. + _service.disable_retries() + self.test_list_synonyms_all_params() + + @responses.activate + def test_list_synonyms_required_params(self): + """ + test_list_synonyms_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString/synonyms') + mock_response = '{"synonyms": [{"synonym": "synonym", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + value = 'testString' + + # Invoke method + response = _service.list_synonyms( + workspace_id, + entity, + value, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_synonyms_required_params_with_retries(self): + # Enable retries and run test_list_synonyms_required_params. + _service.enable_retries() + self.test_list_synonyms_required_params() + + # Disable retries and run test_list_synonyms_required_params. + _service.disable_retries() + self.test_list_synonyms_required_params() + + @responses.activate + def test_list_synonyms_value_error(self): + """ + test_list_synonyms_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString/synonyms') + mock_response = '{"synonyms": [{"synonym": "synonym", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + value = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + "entity": entity, + "value": value, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.list_synonyms(**req_copy) + + def test_list_synonyms_value_error_with_retries(self): + # Enable retries and run test_list_synonyms_value_error. + _service.enable_retries() + self.test_list_synonyms_value_error() + + # Disable retries and run test_list_synonyms_value_error. + _service.disable_retries() + self.test_list_synonyms_value_error() + + +class TestCreateSynonym: + """ + Test Class for create_synonym + """ + + @responses.activate + def test_create_synonym_all_params(self): + """ + create_synonym() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString/synonyms') + mock_response = '{"synonym": "synonym", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + value = 'testString' + synonym = 'testString' + include_audit = False + + # Invoke method + response = _service.create_synonym( + workspace_id, + entity, + value, + synonym, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['synonym'] == 'testString' + + def test_create_synonym_all_params_with_retries(self): + # Enable retries and run test_create_synonym_all_params. + _service.enable_retries() + self.test_create_synonym_all_params() + + # Disable retries and run test_create_synonym_all_params. + _service.disable_retries() + self.test_create_synonym_all_params() + + @responses.activate + def test_create_synonym_required_params(self): + """ + test_create_synonym_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString/synonyms') + mock_response = '{"synonym": "synonym", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + value = 'testString' + synonym = 'testString' + + # Invoke method + response = _service.create_synonym( + workspace_id, + entity, + value, + synonym, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['synonym'] == 'testString' + + def test_create_synonym_required_params_with_retries(self): + # Enable retries and run test_create_synonym_required_params. + _service.enable_retries() + self.test_create_synonym_required_params() + + # Disable retries and run test_create_synonym_required_params. + _service.disable_retries() + self.test_create_synonym_required_params() + + @responses.activate + def test_create_synonym_value_error(self): + """ + test_create_synonym_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString/synonyms') + mock_response = '{"synonym": "synonym", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + value = 'testString' + synonym = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + "entity": entity, + "value": value, + "synonym": synonym, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.create_synonym(**req_copy) + + def test_create_synonym_value_error_with_retries(self): + # Enable retries and run test_create_synonym_value_error. + _service.enable_retries() + self.test_create_synonym_value_error() + + # Disable retries and run test_create_synonym_value_error. + _service.disable_retries() + self.test_create_synonym_value_error() + + +class TestGetSynonym: + """ + Test Class for get_synonym + """ + + @responses.activate + def test_get_synonym_all_params(self): + """ + get_synonym() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString/synonyms/testString') + mock_response = '{"synonym": "synonym", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + value = 'testString' + synonym = 'testString' + include_audit = False + + # Invoke method + response = _service.get_synonym( + workspace_id, + entity, + value, + synonym, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + + def test_get_synonym_all_params_with_retries(self): + # Enable retries and run test_get_synonym_all_params. + _service.enable_retries() + self.test_get_synonym_all_params() + + # Disable retries and run test_get_synonym_all_params. + _service.disable_retries() + self.test_get_synonym_all_params() + + @responses.activate + def test_get_synonym_required_params(self): + """ + test_get_synonym_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString/synonyms/testString') + mock_response = '{"synonym": "synonym", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + value = 'testString' + synonym = 'testString' + + # Invoke method + response = _service.get_synonym( + workspace_id, + entity, + value, + synonym, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_get_synonym_required_params_with_retries(self): + # Enable retries and run test_get_synonym_required_params. + _service.enable_retries() + self.test_get_synonym_required_params() + + # Disable retries and run test_get_synonym_required_params. + _service.disable_retries() + self.test_get_synonym_required_params() + + @responses.activate + def test_get_synonym_value_error(self): + """ + test_get_synonym_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString/synonyms/testString') + mock_response = '{"synonym": "synonym", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + value = 'testString' + synonym = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + "entity": entity, + "value": value, + "synonym": synonym, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_synonym(**req_copy) + + def test_get_synonym_value_error_with_retries(self): + # Enable retries and run test_get_synonym_value_error. + _service.enable_retries() + self.test_get_synonym_value_error() + + # Disable retries and run test_get_synonym_value_error. + _service.disable_retries() + self.test_get_synonym_value_error() + + +class TestUpdateSynonym: + """ + Test Class for update_synonym + """ + + @responses.activate + def test_update_synonym_all_params(self): + """ + update_synonym() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString/synonyms/testString') + mock_response = '{"synonym": "synonym", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + value = 'testString' + synonym = 'testString' + new_synonym = 'testString' + include_audit = False + + # Invoke method + response = _service.update_synonym( + workspace_id, + entity, + value, + synonym, + new_synonym=new_synonym, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['synonym'] == 'testString' + + def test_update_synonym_all_params_with_retries(self): + # Enable retries and run test_update_synonym_all_params. + _service.enable_retries() + self.test_update_synonym_all_params() + + # Disable retries and run test_update_synonym_all_params. + _service.disable_retries() + self.test_update_synonym_all_params() + + @responses.activate + def test_update_synonym_required_params(self): + """ + test_update_synonym_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString/synonyms/testString') + mock_response = '{"synonym": "synonym", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + value = 'testString' + synonym = 'testString' + new_synonym = 'testString' + + # Invoke method + response = _service.update_synonym( + workspace_id, + entity, + value, + synonym, + new_synonym=new_synonym, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['synonym'] == 'testString' + + def test_update_synonym_required_params_with_retries(self): + # Enable retries and run test_update_synonym_required_params. + _service.enable_retries() + self.test_update_synonym_required_params() + + # Disable retries and run test_update_synonym_required_params. + _service.disable_retries() + self.test_update_synonym_required_params() + + @responses.activate + def test_update_synonym_value_error(self): + """ + test_update_synonym_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString/synonyms/testString') + mock_response = '{"synonym": "synonym", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + value = 'testString' + synonym = 'testString' + new_synonym = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + "entity": entity, + "value": value, + "synonym": synonym, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.update_synonym(**req_copy) + + def test_update_synonym_value_error_with_retries(self): + # Enable retries and run test_update_synonym_value_error. + _service.enable_retries() + self.test_update_synonym_value_error() + + # Disable retries and run test_update_synonym_value_error. + _service.disable_retries() + self.test_update_synonym_value_error() + + +class TestDeleteSynonym: + """ + Test Class for delete_synonym + """ + + @responses.activate + def test_delete_synonym_all_params(self): + """ + delete_synonym() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString/synonyms/testString') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + value = 'testString' + synonym = 'testString' + + # Invoke method + response = _service.delete_synonym( + workspace_id, + entity, + value, + synonym, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_delete_synonym_all_params_with_retries(self): + # Enable retries and run test_delete_synonym_all_params. + _service.enable_retries() + self.test_delete_synonym_all_params() + + # Disable retries and run test_delete_synonym_all_params. + _service.disable_retries() + self.test_delete_synonym_all_params() + + @responses.activate + def test_delete_synonym_value_error(self): + """ + test_delete_synonym_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString/synonyms/testString') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + entity = 'testString' + value = 'testString' + synonym = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + "entity": entity, + "value": value, + "synonym": synonym, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_synonym(**req_copy) + + def test_delete_synonym_value_error_with_retries(self): + # Enable retries and run test_delete_synonym_value_error. + _service.enable_retries() + self.test_delete_synonym_value_error() + + # Disable retries and run test_delete_synonym_value_error. + _service.disable_retries() + self.test_delete_synonym_value_error() + + +# endregion +############################################################################## +# End of Service: Synonyms +############################################################################## + +############################################################################## +# Start of Service: DialogNodes +############################################################################## +# region + + +class TestListDialogNodes: + """ + Test Class for list_dialog_nodes + """ + + @responses.activate + def test_list_dialog_nodes_all_params(self): + """ + list_dialog_nodes() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/dialog_nodes') + mock_response = '{"dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + page_limit = 100 + include_count = False + sort = 'dialog_node' + cursor = 'testString' + include_audit = False + + # Invoke method + response = _service.list_dialog_nodes( + workspace_id, + page_limit=page_limit, + include_count=include_count, + sort=sort, + cursor=cursor, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'page_limit={}'.format(page_limit) in query_string + assert 'include_count={}'.format('true' if include_count else 'false') in query_string + assert 'sort={}'.format(sort) in query_string + assert 'cursor={}'.format(cursor) in query_string + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + + def test_list_dialog_nodes_all_params_with_retries(self): + # Enable retries and run test_list_dialog_nodes_all_params. + _service.enable_retries() + self.test_list_dialog_nodes_all_params() + + # Disable retries and run test_list_dialog_nodes_all_params. + _service.disable_retries() + self.test_list_dialog_nodes_all_params() + + @responses.activate + def test_list_dialog_nodes_required_params(self): + """ + test_list_dialog_nodes_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/dialog_nodes') + mock_response = '{"dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + + # Invoke method + response = _service.list_dialog_nodes( + workspace_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_dialog_nodes_required_params_with_retries(self): + # Enable retries and run test_list_dialog_nodes_required_params. + _service.enable_retries() + self.test_list_dialog_nodes_required_params() + + # Disable retries and run test_list_dialog_nodes_required_params. + _service.disable_retries() + self.test_list_dialog_nodes_required_params() + + @responses.activate + def test_list_dialog_nodes_value_error(self): + """ + test_list_dialog_nodes_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/dialog_nodes') + mock_response = '{"dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.list_dialog_nodes(**req_copy) + + def test_list_dialog_nodes_value_error_with_retries(self): + # Enable retries and run test_list_dialog_nodes_value_error. + _service.enable_retries() + self.test_list_dialog_nodes_value_error() + + # Disable retries and run test_list_dialog_nodes_value_error. + _service.disable_retries() + self.test_list_dialog_nodes_value_error() + + +class TestCreateDialogNode: + """ + Test Class for create_dialog_node + """ + + @responses.activate + def test_create_dialog_node_all_params(self): + """ + create_dialog_node() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/dialog_nodes') + mock_response = '{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Construct a dict representation of a DialogNodeOutputTextValuesElement model + dialog_node_output_text_values_element_model = {} + dialog_node_output_text_values_element_model['text'] = 'testString' + + # Construct a dict representation of a ResponseGenericChannel model + response_generic_channel_model = {} + response_generic_channel_model['channel'] = 'chat' + + # Construct a dict representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeText model + dialog_node_output_generic_model = {} + dialog_node_output_generic_model['response_type'] = 'text' + dialog_node_output_generic_model['values'] = [dialog_node_output_text_values_element_model] + dialog_node_output_generic_model['selection_policy'] = 'sequential' + dialog_node_output_generic_model['delimiter'] = '\\n' + dialog_node_output_generic_model['channels'] = [response_generic_channel_model] + + # Construct a dict representation of a DialogNodeOutputModifiers model + dialog_node_output_modifiers_model = {} + dialog_node_output_modifiers_model['overwrite'] = True + + # Construct a dict representation of a DialogNodeOutput model + dialog_node_output_model = {} + dialog_node_output_model['generic'] = [dialog_node_output_generic_model] + dialog_node_output_model['integrations'] = {'key1': {'anyKey': 'anyValue'}} + dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model + dialog_node_output_model['foo'] = 'testString' + + # Construct a dict representation of a DialogNodeContext model + dialog_node_context_model = {} + dialog_node_context_model['integrations'] = {'key1': {'anyKey': 'anyValue'}} + dialog_node_context_model['foo'] = 'testString' + + # Construct a dict representation of a DialogNodeNextStep model + dialog_node_next_step_model = {} + dialog_node_next_step_model['behavior'] = 'get_user_input' + dialog_node_next_step_model['dialog_node'] = 'testString' + dialog_node_next_step_model['selector'] = 'condition' + + # Construct a dict representation of a DialogNodeAction model + dialog_node_action_model = {} + dialog_node_action_model['name'] = 'testString' + dialog_node_action_model['type'] = 'client' + dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'} + dialog_node_action_model['result_variable'] = 'testString' + dialog_node_action_model['credentials'] = 'testString' + + # Set up parameter values + workspace_id = 'testString' + dialog_node = 'testString' + description = 'testString' + conditions = 'testString' + parent = 'testString' + previous_sibling = 'testString' + output = dialog_node_output_model + context = dialog_node_context_model + metadata = {'anyKey': 'anyValue'} + next_step = dialog_node_next_step_model + title = 'testString' + type = 'standard' + event_name = 'focus' + variable = 'testString' + actions = [dialog_node_action_model] + digress_in = 'not_available' + digress_out = 'allow_returning' + digress_out_slots = 'not_allowed' + user_label = 'testString' + disambiguation_opt_out = False + include_audit = False + + # Invoke method + response = _service.create_dialog_node( + workspace_id, + dialog_node, + description=description, + conditions=conditions, + parent=parent, + previous_sibling=previous_sibling, + output=output, + context=context, + metadata=metadata, + next_step=next_step, + title=title, + type=type, + event_name=event_name, + variable=variable, + actions=actions, + digress_in=digress_in, + digress_out=digress_out, + digress_out_slots=digress_out_slots, + user_label=user_label, + disambiguation_opt_out=disambiguation_opt_out, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['dialog_node'] == 'testString' + assert req_body['description'] == 'testString' + assert req_body['conditions'] == 'testString' + assert req_body['parent'] == 'testString' + assert req_body['previous_sibling'] == 'testString' + assert req_body['output'] == dialog_node_output_model + assert req_body['context'] == dialog_node_context_model + assert req_body['metadata'] == {'anyKey': 'anyValue'} + assert req_body['next_step'] == dialog_node_next_step_model + assert req_body['title'] == 'testString' + assert req_body['type'] == 'standard' + assert req_body['event_name'] == 'focus' + assert req_body['variable'] == 'testString' + assert req_body['actions'] == [dialog_node_action_model] + assert req_body['digress_in'] == 'not_available' + assert req_body['digress_out'] == 'allow_returning' + assert req_body['digress_out_slots'] == 'not_allowed' + assert req_body['user_label'] == 'testString' + assert req_body['disambiguation_opt_out'] == False + + def test_create_dialog_node_all_params_with_retries(self): + # Enable retries and run test_create_dialog_node_all_params. + _service.enable_retries() + self.test_create_dialog_node_all_params() + + # Disable retries and run test_create_dialog_node_all_params. + _service.disable_retries() + self.test_create_dialog_node_all_params() + + @responses.activate + def test_create_dialog_node_required_params(self): + """ + test_create_dialog_node_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/dialog_nodes') + mock_response = '{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Construct a dict representation of a DialogNodeOutputTextValuesElement model + dialog_node_output_text_values_element_model = {} + dialog_node_output_text_values_element_model['text'] = 'testString' + + # Construct a dict representation of a ResponseGenericChannel model + response_generic_channel_model = {} + response_generic_channel_model['channel'] = 'chat' + + # Construct a dict representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeText model + dialog_node_output_generic_model = {} + dialog_node_output_generic_model['response_type'] = 'text' + dialog_node_output_generic_model['values'] = [dialog_node_output_text_values_element_model] + dialog_node_output_generic_model['selection_policy'] = 'sequential' + dialog_node_output_generic_model['delimiter'] = '\\n' + dialog_node_output_generic_model['channels'] = [response_generic_channel_model] + + # Construct a dict representation of a DialogNodeOutputModifiers model + dialog_node_output_modifiers_model = {} + dialog_node_output_modifiers_model['overwrite'] = True + + # Construct a dict representation of a DialogNodeOutput model + dialog_node_output_model = {} + dialog_node_output_model['generic'] = [dialog_node_output_generic_model] + dialog_node_output_model['integrations'] = {'key1': {'anyKey': 'anyValue'}} + dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model + dialog_node_output_model['foo'] = 'testString' + + # Construct a dict representation of a DialogNodeContext model + dialog_node_context_model = {} + dialog_node_context_model['integrations'] = {'key1': {'anyKey': 'anyValue'}} + dialog_node_context_model['foo'] = 'testString' + + # Construct a dict representation of a DialogNodeNextStep model + dialog_node_next_step_model = {} + dialog_node_next_step_model['behavior'] = 'get_user_input' + dialog_node_next_step_model['dialog_node'] = 'testString' + dialog_node_next_step_model['selector'] = 'condition' + + # Construct a dict representation of a DialogNodeAction model + dialog_node_action_model = {} + dialog_node_action_model['name'] = 'testString' + dialog_node_action_model['type'] = 'client' + dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'} + dialog_node_action_model['result_variable'] = 'testString' + dialog_node_action_model['credentials'] = 'testString' + + # Set up parameter values + workspace_id = 'testString' + dialog_node = 'testString' + description = 'testString' + conditions = 'testString' + parent = 'testString' + previous_sibling = 'testString' + output = dialog_node_output_model + context = dialog_node_context_model + metadata = {'anyKey': 'anyValue'} + next_step = dialog_node_next_step_model + title = 'testString' + type = 'standard' + event_name = 'focus' + variable = 'testString' + actions = [dialog_node_action_model] + digress_in = 'not_available' + digress_out = 'allow_returning' + digress_out_slots = 'not_allowed' + user_label = 'testString' + disambiguation_opt_out = False + + # Invoke method + response = _service.create_dialog_node( + workspace_id, + dialog_node, + description=description, + conditions=conditions, + parent=parent, + previous_sibling=previous_sibling, + output=output, + context=context, + metadata=metadata, + next_step=next_step, + title=title, + type=type, + event_name=event_name, + variable=variable, + actions=actions, + digress_in=digress_in, + digress_out=digress_out, + digress_out_slots=digress_out_slots, + user_label=user_label, + disambiguation_opt_out=disambiguation_opt_out, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['dialog_node'] == 'testString' + assert req_body['description'] == 'testString' + assert req_body['conditions'] == 'testString' + assert req_body['parent'] == 'testString' + assert req_body['previous_sibling'] == 'testString' + assert req_body['output'] == dialog_node_output_model + assert req_body['context'] == dialog_node_context_model + assert req_body['metadata'] == {'anyKey': 'anyValue'} + assert req_body['next_step'] == dialog_node_next_step_model + assert req_body['title'] == 'testString' + assert req_body['type'] == 'standard' + assert req_body['event_name'] == 'focus' + assert req_body['variable'] == 'testString' + assert req_body['actions'] == [dialog_node_action_model] + assert req_body['digress_in'] == 'not_available' + assert req_body['digress_out'] == 'allow_returning' + assert req_body['digress_out_slots'] == 'not_allowed' + assert req_body['user_label'] == 'testString' + assert req_body['disambiguation_opt_out'] == False + + def test_create_dialog_node_required_params_with_retries(self): + # Enable retries and run test_create_dialog_node_required_params. + _service.enable_retries() + self.test_create_dialog_node_required_params() + + # Disable retries and run test_create_dialog_node_required_params. + _service.disable_retries() + self.test_create_dialog_node_required_params() + + @responses.activate + def test_create_dialog_node_value_error(self): + """ + test_create_dialog_node_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/dialog_nodes') + mock_response = '{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Construct a dict representation of a DialogNodeOutputTextValuesElement model + dialog_node_output_text_values_element_model = {} + dialog_node_output_text_values_element_model['text'] = 'testString' + + # Construct a dict representation of a ResponseGenericChannel model + response_generic_channel_model = {} + response_generic_channel_model['channel'] = 'chat' + + # Construct a dict representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeText model + dialog_node_output_generic_model = {} + dialog_node_output_generic_model['response_type'] = 'text' + dialog_node_output_generic_model['values'] = [dialog_node_output_text_values_element_model] + dialog_node_output_generic_model['selection_policy'] = 'sequential' + dialog_node_output_generic_model['delimiter'] = '\\n' + dialog_node_output_generic_model['channels'] = [response_generic_channel_model] + + # Construct a dict representation of a DialogNodeOutputModifiers model + dialog_node_output_modifiers_model = {} + dialog_node_output_modifiers_model['overwrite'] = True + + # Construct a dict representation of a DialogNodeOutput model + dialog_node_output_model = {} + dialog_node_output_model['generic'] = [dialog_node_output_generic_model] + dialog_node_output_model['integrations'] = {'key1': {'anyKey': 'anyValue'}} + dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model + dialog_node_output_model['foo'] = 'testString' + + # Construct a dict representation of a DialogNodeContext model + dialog_node_context_model = {} + dialog_node_context_model['integrations'] = {'key1': {'anyKey': 'anyValue'}} + dialog_node_context_model['foo'] = 'testString' + + # Construct a dict representation of a DialogNodeNextStep model + dialog_node_next_step_model = {} + dialog_node_next_step_model['behavior'] = 'get_user_input' + dialog_node_next_step_model['dialog_node'] = 'testString' + dialog_node_next_step_model['selector'] = 'condition' + + # Construct a dict representation of a DialogNodeAction model + dialog_node_action_model = {} + dialog_node_action_model['name'] = 'testString' + dialog_node_action_model['type'] = 'client' + dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'} + dialog_node_action_model['result_variable'] = 'testString' + dialog_node_action_model['credentials'] = 'testString' + + # Set up parameter values + workspace_id = 'testString' + dialog_node = 'testString' + description = 'testString' + conditions = 'testString' + parent = 'testString' + previous_sibling = 'testString' + output = dialog_node_output_model + context = dialog_node_context_model + metadata = {'anyKey': 'anyValue'} + next_step = dialog_node_next_step_model + title = 'testString' + type = 'standard' + event_name = 'focus' + variable = 'testString' + actions = [dialog_node_action_model] + digress_in = 'not_available' + digress_out = 'allow_returning' + digress_out_slots = 'not_allowed' + user_label = 'testString' + disambiguation_opt_out = False + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + "dialog_node": dialog_node, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.create_dialog_node(**req_copy) + + def test_create_dialog_node_value_error_with_retries(self): + # Enable retries and run test_create_dialog_node_value_error. + _service.enable_retries() + self.test_create_dialog_node_value_error() + + # Disable retries and run test_create_dialog_node_value_error. + _service.disable_retries() + self.test_create_dialog_node_value_error() + + +class TestGetDialogNode: + """ + Test Class for get_dialog_node + """ + + @responses.activate + def test_get_dialog_node_all_params(self): + """ + get_dialog_node() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/dialog_nodes/testString') + mock_response = '{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + dialog_node = 'testString' + include_audit = False + + # Invoke method + response = _service.get_dialog_node( + workspace_id, + dialog_node, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + + def test_get_dialog_node_all_params_with_retries(self): + # Enable retries and run test_get_dialog_node_all_params. + _service.enable_retries() + self.test_get_dialog_node_all_params() + + # Disable retries and run test_get_dialog_node_all_params. + _service.disable_retries() + self.test_get_dialog_node_all_params() + + @responses.activate + def test_get_dialog_node_required_params(self): + """ + test_get_dialog_node_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/dialog_nodes/testString') + mock_response = '{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + dialog_node = 'testString' + + # Invoke method + response = _service.get_dialog_node( + workspace_id, + dialog_node, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_get_dialog_node_required_params_with_retries(self): + # Enable retries and run test_get_dialog_node_required_params. + _service.enable_retries() + self.test_get_dialog_node_required_params() + + # Disable retries and run test_get_dialog_node_required_params. + _service.disable_retries() + self.test_get_dialog_node_required_params() + + @responses.activate + def test_get_dialog_node_value_error(self): + """ + test_get_dialog_node_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/dialog_nodes/testString') + mock_response = '{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + dialog_node = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + "dialog_node": dialog_node, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_dialog_node(**req_copy) + + def test_get_dialog_node_value_error_with_retries(self): + # Enable retries and run test_get_dialog_node_value_error. + _service.enable_retries() + self.test_get_dialog_node_value_error() + + # Disable retries and run test_get_dialog_node_value_error. + _service.disable_retries() + self.test_get_dialog_node_value_error() + + +class TestUpdateDialogNode: + """ + Test Class for update_dialog_node + """ + + @responses.activate + def test_update_dialog_node_all_params(self): + """ + update_dialog_node() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/dialog_nodes/testString') + mock_response = '{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Construct a dict representation of a DialogNodeOutputTextValuesElement model + dialog_node_output_text_values_element_model = {} + dialog_node_output_text_values_element_model['text'] = 'testString' + + # Construct a dict representation of a ResponseGenericChannel model + response_generic_channel_model = {} + response_generic_channel_model['channel'] = 'chat' + + # Construct a dict representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeText model + dialog_node_output_generic_model = {} + dialog_node_output_generic_model['response_type'] = 'text' + dialog_node_output_generic_model['values'] = [dialog_node_output_text_values_element_model] + dialog_node_output_generic_model['selection_policy'] = 'sequential' + dialog_node_output_generic_model['delimiter'] = '\\n' + dialog_node_output_generic_model['channels'] = [response_generic_channel_model] + + # Construct a dict representation of a DialogNodeOutputModifiers model + dialog_node_output_modifiers_model = {} + dialog_node_output_modifiers_model['overwrite'] = True + + # Construct a dict representation of a DialogNodeOutput model + dialog_node_output_model = {} + dialog_node_output_model['generic'] = [dialog_node_output_generic_model] + dialog_node_output_model['integrations'] = {'key1': {'anyKey': 'anyValue'}} + dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model + dialog_node_output_model['foo'] = 'testString' + + # Construct a dict representation of a DialogNodeContext model + dialog_node_context_model = {} + dialog_node_context_model['integrations'] = {'key1': {'anyKey': 'anyValue'}} + dialog_node_context_model['foo'] = 'testString' + + # Construct a dict representation of a DialogNodeNextStep model + dialog_node_next_step_model = {} + dialog_node_next_step_model['behavior'] = 'get_user_input' + dialog_node_next_step_model['dialog_node'] = 'testString' + dialog_node_next_step_model['selector'] = 'condition' + + # Construct a dict representation of a DialogNodeAction model + dialog_node_action_model = {} + dialog_node_action_model['name'] = 'testString' + dialog_node_action_model['type'] = 'client' + dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'} + dialog_node_action_model['result_variable'] = 'testString' + dialog_node_action_model['credentials'] = 'testString' + + # Set up parameter values + workspace_id = 'testString' + dialog_node = 'testString' + new_dialog_node = 'testString' + new_description = 'testString' + new_conditions = 'testString' + new_parent = 'testString' + new_previous_sibling = 'testString' + new_output = dialog_node_output_model + new_context = dialog_node_context_model + new_metadata = {'anyKey': 'anyValue'} + new_next_step = dialog_node_next_step_model + new_title = 'testString' + new_type = 'standard' + new_event_name = 'focus' + new_variable = 'testString' + new_actions = [dialog_node_action_model] + new_digress_in = 'not_available' + new_digress_out = 'allow_returning' + new_digress_out_slots = 'not_allowed' + new_user_label = 'testString' + new_disambiguation_opt_out = False + include_audit = False + + # Invoke method + response = _service.update_dialog_node( + workspace_id, + dialog_node, + new_dialog_node=new_dialog_node, + new_description=new_description, + new_conditions=new_conditions, + new_parent=new_parent, + new_previous_sibling=new_previous_sibling, + new_output=new_output, + new_context=new_context, + new_metadata=new_metadata, + new_next_step=new_next_step, + new_title=new_title, + new_type=new_type, + new_event_name=new_event_name, + new_variable=new_variable, + new_actions=new_actions, + new_digress_in=new_digress_in, + new_digress_out=new_digress_out, + new_digress_out_slots=new_digress_out_slots, + new_user_label=new_user_label, + new_disambiguation_opt_out=new_disambiguation_opt_out, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['dialog_node'] == 'testString' + assert req_body['description'] == 'testString' + assert req_body['conditions'] == 'testString' + assert req_body['parent'] == 'testString' + assert req_body['previous_sibling'] == 'testString' + assert req_body['output'] == dialog_node_output_model + assert req_body['context'] == dialog_node_context_model + assert req_body['metadata'] == {'anyKey': 'anyValue'} + assert req_body['next_step'] == dialog_node_next_step_model + assert req_body['title'] == 'testString' + assert req_body['type'] == 'standard' + assert req_body['event_name'] == 'focus' + assert req_body['variable'] == 'testString' + assert req_body['actions'] == [dialog_node_action_model] + assert req_body['digress_in'] == 'not_available' + assert req_body['digress_out'] == 'allow_returning' + assert req_body['digress_out_slots'] == 'not_allowed' + assert req_body['user_label'] == 'testString' + assert req_body['disambiguation_opt_out'] == False + + def test_update_dialog_node_all_params_with_retries(self): + # Enable retries and run test_update_dialog_node_all_params. + _service.enable_retries() + self.test_update_dialog_node_all_params() + + # Disable retries and run test_update_dialog_node_all_params. + _service.disable_retries() + self.test_update_dialog_node_all_params() + + @responses.activate + def test_update_dialog_node_required_params(self): + """ + test_update_dialog_node_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/dialog_nodes/testString') + mock_response = '{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Construct a dict representation of a DialogNodeOutputTextValuesElement model + dialog_node_output_text_values_element_model = {} + dialog_node_output_text_values_element_model['text'] = 'testString' + + # Construct a dict representation of a ResponseGenericChannel model + response_generic_channel_model = {} + response_generic_channel_model['channel'] = 'chat' + + # Construct a dict representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeText model + dialog_node_output_generic_model = {} + dialog_node_output_generic_model['response_type'] = 'text' + dialog_node_output_generic_model['values'] = [dialog_node_output_text_values_element_model] + dialog_node_output_generic_model['selection_policy'] = 'sequential' + dialog_node_output_generic_model['delimiter'] = '\\n' + dialog_node_output_generic_model['channels'] = [response_generic_channel_model] + + # Construct a dict representation of a DialogNodeOutputModifiers model + dialog_node_output_modifiers_model = {} + dialog_node_output_modifiers_model['overwrite'] = True + + # Construct a dict representation of a DialogNodeOutput model + dialog_node_output_model = {} + dialog_node_output_model['generic'] = [dialog_node_output_generic_model] + dialog_node_output_model['integrations'] = {'key1': {'anyKey': 'anyValue'}} + dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model + dialog_node_output_model['foo'] = 'testString' + + # Construct a dict representation of a DialogNodeContext model + dialog_node_context_model = {} + dialog_node_context_model['integrations'] = {'key1': {'anyKey': 'anyValue'}} + dialog_node_context_model['foo'] = 'testString' + + # Construct a dict representation of a DialogNodeNextStep model + dialog_node_next_step_model = {} + dialog_node_next_step_model['behavior'] = 'get_user_input' + dialog_node_next_step_model['dialog_node'] = 'testString' + dialog_node_next_step_model['selector'] = 'condition' + + # Construct a dict representation of a DialogNodeAction model + dialog_node_action_model = {} + dialog_node_action_model['name'] = 'testString' + dialog_node_action_model['type'] = 'client' + dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'} + dialog_node_action_model['result_variable'] = 'testString' + dialog_node_action_model['credentials'] = 'testString' + + # Set up parameter values + workspace_id = 'testString' + dialog_node = 'testString' + new_dialog_node = 'testString' + new_description = 'testString' + new_conditions = 'testString' + new_parent = 'testString' + new_previous_sibling = 'testString' + new_output = dialog_node_output_model + new_context = dialog_node_context_model + new_metadata = {'anyKey': 'anyValue'} + new_next_step = dialog_node_next_step_model + new_title = 'testString' + new_type = 'standard' + new_event_name = 'focus' + new_variable = 'testString' + new_actions = [dialog_node_action_model] + new_digress_in = 'not_available' + new_digress_out = 'allow_returning' + new_digress_out_slots = 'not_allowed' + new_user_label = 'testString' + new_disambiguation_opt_out = False + + # Invoke method + response = _service.update_dialog_node( + workspace_id, + dialog_node, + new_dialog_node=new_dialog_node, + new_description=new_description, + new_conditions=new_conditions, + new_parent=new_parent, + new_previous_sibling=new_previous_sibling, + new_output=new_output, + new_context=new_context, + new_metadata=new_metadata, + new_next_step=new_next_step, + new_title=new_title, + new_type=new_type, + new_event_name=new_event_name, + new_variable=new_variable, + new_actions=new_actions, + new_digress_in=new_digress_in, + new_digress_out=new_digress_out, + new_digress_out_slots=new_digress_out_slots, + new_user_label=new_user_label, + new_disambiguation_opt_out=new_disambiguation_opt_out, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['dialog_node'] == 'testString' + assert req_body['description'] == 'testString' + assert req_body['conditions'] == 'testString' + assert req_body['parent'] == 'testString' + assert req_body['previous_sibling'] == 'testString' + assert req_body['output'] == dialog_node_output_model + assert req_body['context'] == dialog_node_context_model + assert req_body['metadata'] == {'anyKey': 'anyValue'} + assert req_body['next_step'] == dialog_node_next_step_model + assert req_body['title'] == 'testString' + assert req_body['type'] == 'standard' + assert req_body['event_name'] == 'focus' + assert req_body['variable'] == 'testString' + assert req_body['actions'] == [dialog_node_action_model] + assert req_body['digress_in'] == 'not_available' + assert req_body['digress_out'] == 'allow_returning' + assert req_body['digress_out_slots'] == 'not_allowed' + assert req_body['user_label'] == 'testString' + assert req_body['disambiguation_opt_out'] == False + + def test_update_dialog_node_required_params_with_retries(self): + # Enable retries and run test_update_dialog_node_required_params. + _service.enable_retries() + self.test_update_dialog_node_required_params() + + # Disable retries and run test_update_dialog_node_required_params. + _service.disable_retries() + self.test_update_dialog_node_required_params() + + @responses.activate + def test_update_dialog_node_value_error(self): + """ + test_update_dialog_node_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/dialog_nodes/testString') + mock_response = '{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Construct a dict representation of a DialogNodeOutputTextValuesElement model + dialog_node_output_text_values_element_model = {} + dialog_node_output_text_values_element_model['text'] = 'testString' + + # Construct a dict representation of a ResponseGenericChannel model + response_generic_channel_model = {} + response_generic_channel_model['channel'] = 'chat' + + # Construct a dict representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeText model + dialog_node_output_generic_model = {} + dialog_node_output_generic_model['response_type'] = 'text' + dialog_node_output_generic_model['values'] = [dialog_node_output_text_values_element_model] + dialog_node_output_generic_model['selection_policy'] = 'sequential' + dialog_node_output_generic_model['delimiter'] = '\\n' + dialog_node_output_generic_model['channels'] = [response_generic_channel_model] + + # Construct a dict representation of a DialogNodeOutputModifiers model + dialog_node_output_modifiers_model = {} + dialog_node_output_modifiers_model['overwrite'] = True + + # Construct a dict representation of a DialogNodeOutput model + dialog_node_output_model = {} + dialog_node_output_model['generic'] = [dialog_node_output_generic_model] + dialog_node_output_model['integrations'] = {'key1': {'anyKey': 'anyValue'}} + dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model + dialog_node_output_model['foo'] = 'testString' + + # Construct a dict representation of a DialogNodeContext model + dialog_node_context_model = {} + dialog_node_context_model['integrations'] = {'key1': {'anyKey': 'anyValue'}} + dialog_node_context_model['foo'] = 'testString' + + # Construct a dict representation of a DialogNodeNextStep model + dialog_node_next_step_model = {} + dialog_node_next_step_model['behavior'] = 'get_user_input' + dialog_node_next_step_model['dialog_node'] = 'testString' + dialog_node_next_step_model['selector'] = 'condition' + + # Construct a dict representation of a DialogNodeAction model + dialog_node_action_model = {} + dialog_node_action_model['name'] = 'testString' + dialog_node_action_model['type'] = 'client' + dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'} + dialog_node_action_model['result_variable'] = 'testString' + dialog_node_action_model['credentials'] = 'testString' + + # Set up parameter values + workspace_id = 'testString' + dialog_node = 'testString' + new_dialog_node = 'testString' + new_description = 'testString' + new_conditions = 'testString' + new_parent = 'testString' + new_previous_sibling = 'testString' + new_output = dialog_node_output_model + new_context = dialog_node_context_model + new_metadata = {'anyKey': 'anyValue'} + new_next_step = dialog_node_next_step_model + new_title = 'testString' + new_type = 'standard' + new_event_name = 'focus' + new_variable = 'testString' + new_actions = [dialog_node_action_model] + new_digress_in = 'not_available' + new_digress_out = 'allow_returning' + new_digress_out_slots = 'not_allowed' + new_user_label = 'testString' + new_disambiguation_opt_out = False + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + "dialog_node": dialog_node, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.update_dialog_node(**req_copy) + + def test_update_dialog_node_value_error_with_retries(self): + # Enable retries and run test_update_dialog_node_value_error. + _service.enable_retries() + self.test_update_dialog_node_value_error() + + # Disable retries and run test_update_dialog_node_value_error. + _service.disable_retries() + self.test_update_dialog_node_value_error() + + +class TestDeleteDialogNode: + """ + Test Class for delete_dialog_node + """ + + @responses.activate + def test_delete_dialog_node_all_params(self): + """ + delete_dialog_node() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/dialog_nodes/testString') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + dialog_node = 'testString' + + # Invoke method + response = _service.delete_dialog_node( + workspace_id, + dialog_node, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_delete_dialog_node_all_params_with_retries(self): + # Enable retries and run test_delete_dialog_node_all_params. + _service.enable_retries() + self.test_delete_dialog_node_all_params() + + # Disable retries and run test_delete_dialog_node_all_params. + _service.disable_retries() + self.test_delete_dialog_node_all_params() + + @responses.activate + def test_delete_dialog_node_value_error(self): + """ + test_delete_dialog_node_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/dialog_nodes/testString') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + dialog_node = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + "dialog_node": dialog_node, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_dialog_node(**req_copy) + + def test_delete_dialog_node_value_error_with_retries(self): + # Enable retries and run test_delete_dialog_node_value_error. + _service.enable_retries() + self.test_delete_dialog_node_value_error() + + # Disable retries and run test_delete_dialog_node_value_error. + _service.disable_retries() + self.test_delete_dialog_node_value_error() + + +# endregion +############################################################################## +# End of Service: DialogNodes +############################################################################## + +############################################################################## +# Start of Service: Logs +############################################################################## +# region + + +class TestListLogs: + """ + Test Class for list_logs + """ + + @responses.activate + def test_list_logs_all_params(self): + """ + list_logs() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/logs') + mock_response = '{"logs": [{"request": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"anyKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "generic": [{"response_type": "text", "text": "text", "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "response": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"anyKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "generic": [{"response_type": "text", "text": "text", "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "log_id": "log_id", "request_timestamp": "request_timestamp", "response_timestamp": "response_timestamp", "workspace_id": "workspace_id", "language": "language"}], "pagination": {"next_url": "next_url", "matched": 7, "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + sort = 'testString' + filter = 'testString' + page_limit = 100 + cursor = 'testString' + + # Invoke method + response = _service.list_logs( + workspace_id, + sort=sort, + filter=filter, + page_limit=page_limit, + cursor=cursor, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'sort={}'.format(sort) in query_string + assert 'filter={}'.format(filter) in query_string + assert 'page_limit={}'.format(page_limit) in query_string + assert 'cursor={}'.format(cursor) in query_string + + def test_list_logs_all_params_with_retries(self): + # Enable retries and run test_list_logs_all_params. + _service.enable_retries() + self.test_list_logs_all_params() + + # Disable retries and run test_list_logs_all_params. + _service.disable_retries() + self.test_list_logs_all_params() + + @responses.activate + def test_list_logs_required_params(self): + """ + test_list_logs_required_params() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/logs') + mock_response = '{"logs": [{"request": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"anyKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "generic": [{"response_type": "text", "text": "text", "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "response": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"anyKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "generic": [{"response_type": "text", "text": "text", "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "log_id": "log_id", "request_timestamp": "request_timestamp", "response_timestamp": "response_timestamp", "workspace_id": "workspace_id", "language": "language"}], "pagination": {"next_url": "next_url", "matched": 7, "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + + # Invoke method + response = _service.list_logs( + workspace_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_logs_required_params_with_retries(self): + # Enable retries and run test_list_logs_required_params. + _service.enable_retries() + self.test_list_logs_required_params() + + # Disable retries and run test_list_logs_required_params. + _service.disable_retries() + self.test_list_logs_required_params() + + @responses.activate + def test_list_logs_value_error(self): + """ + test_list_logs_value_error() + """ + # Set up mock + url = preprocess_url('/v1/workspaces/testString/logs') + mock_response = '{"logs": [{"request": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"anyKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "generic": [{"response_type": "text", "text": "text", "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "response": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"anyKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "generic": [{"response_type": "text", "text": "text", "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "log_id": "log_id", "request_timestamp": "request_timestamp", "response_timestamp": "response_timestamp", "workspace_id": "workspace_id", "language": "language"}], "pagination": {"next_url": "next_url", "matched": 7, "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + workspace_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "workspace_id": workspace_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.list_logs(**req_copy) + + def test_list_logs_value_error_with_retries(self): + # Enable retries and run test_list_logs_value_error. + _service.enable_retries() + self.test_list_logs_value_error() + + # Disable retries and run test_list_logs_value_error. + _service.disable_retries() + self.test_list_logs_value_error() + + +class TestListAllLogs: + """ + Test Class for list_all_logs + """ + + @responses.activate + def test_list_all_logs_all_params(self): + """ + list_all_logs() + """ + # Set up mock + url = preprocess_url('/v1/logs') + mock_response = '{"logs": [{"request": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"anyKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "generic": [{"response_type": "text", "text": "text", "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "response": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"anyKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "generic": [{"response_type": "text", "text": "text", "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "log_id": "log_id", "request_timestamp": "request_timestamp", "response_timestamp": "response_timestamp", "workspace_id": "workspace_id", "language": "language"}], "pagination": {"next_url": "next_url", "matched": 7, "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + filter = 'testString' + sort = 'testString' + page_limit = 100 + cursor = 'testString' + + # Invoke method + response = _service.list_all_logs( + filter, + sort=sort, + page_limit=page_limit, + cursor=cursor, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'filter={}'.format(filter) in query_string + assert 'sort={}'.format(sort) in query_string + assert 'page_limit={}'.format(page_limit) in query_string + assert 'cursor={}'.format(cursor) in query_string + + def test_list_all_logs_all_params_with_retries(self): + # Enable retries and run test_list_all_logs_all_params. + _service.enable_retries() + self.test_list_all_logs_all_params() + + # Disable retries and run test_list_all_logs_all_params. + _service.disable_retries() + self.test_list_all_logs_all_params() + + @responses.activate + def test_list_all_logs_required_params(self): + """ + test_list_all_logs_required_params() + """ + # Set up mock + url = preprocess_url('/v1/logs') + mock_response = '{"logs": [{"request": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"anyKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "generic": [{"response_type": "text", "text": "text", "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "response": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"anyKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "generic": [{"response_type": "text", "text": "text", "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "log_id": "log_id", "request_timestamp": "request_timestamp", "response_timestamp": "response_timestamp", "workspace_id": "workspace_id", "language": "language"}], "pagination": {"next_url": "next_url", "matched": 7, "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + filter = 'testString' + + # Invoke method + response = _service.list_all_logs( + filter, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'filter={}'.format(filter) in query_string + + def test_list_all_logs_required_params_with_retries(self): + # Enable retries and run test_list_all_logs_required_params. + _service.enable_retries() + self.test_list_all_logs_required_params() + + # Disable retries and run test_list_all_logs_required_params. + _service.disable_retries() + self.test_list_all_logs_required_params() + + @responses.activate + def test_list_all_logs_value_error(self): + """ + test_list_all_logs_value_error() + """ + # Set up mock + url = preprocess_url('/v1/logs') + mock_response = '{"logs": [{"request": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"anyKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "generic": [{"response_type": "text", "text": "text", "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "response": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"anyKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "generic": [{"response_type": "text", "text": "text", "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "log_id": "log_id", "request_timestamp": "request_timestamp", "response_timestamp": "response_timestamp", "workspace_id": "workspace_id", "language": "language"}], "pagination": {"next_url": "next_url", "matched": 7, "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + filter = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "filter": filter, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.list_all_logs(**req_copy) + + def test_list_all_logs_value_error_with_retries(self): + # Enable retries and run test_list_all_logs_value_error. + _service.enable_retries() + self.test_list_all_logs_value_error() + + # Disable retries and run test_list_all_logs_value_error. + _service.disable_retries() + self.test_list_all_logs_value_error() + + +# endregion +############################################################################## +# End of Service: Logs +############################################################################## + +############################################################################## +# Start of Service: UserData +############################################################################## +# region + + +class TestDeleteUserData: + """ + Test Class for delete_user_data + """ + + @responses.activate + def test_delete_user_data_all_params(self): + """ + delete_user_data() + """ + # Set up mock + url = preprocess_url('/v1/user_data') + responses.add( + responses.DELETE, + url, + status=202, + ) + + # Set up parameter values + customer_id = 'testString' + + # Invoke method + response = _service.delete_user_data( + customer_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 202 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'customer_id={}'.format(customer_id) in query_string + + def test_delete_user_data_all_params_with_retries(self): + # Enable retries and run test_delete_user_data_all_params. + _service.enable_retries() + self.test_delete_user_data_all_params() + + # Disable retries and run test_delete_user_data_all_params. + _service.disable_retries() + self.test_delete_user_data_all_params() + + @responses.activate + def test_delete_user_data_value_error(self): + """ + test_delete_user_data_value_error() + """ + # Set up mock + url = preprocess_url('/v1/user_data') + responses.add( + responses.DELETE, + url, + status=202, + ) + + # Set up parameter values + customer_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customer_id": customer_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_user_data(**req_copy) + + def test_delete_user_data_value_error_with_retries(self): + # Enable retries and run test_delete_user_data_value_error. + _service.enable_retries() + self.test_delete_user_data_value_error() + + # Disable retries and run test_delete_user_data_value_error. + _service.disable_retries() + self.test_delete_user_data_value_error() + + +# endregion +############################################################################## +# End of Service: UserData +############################################################################## + + +############################################################################## +# Start of Model Tests +############################################################################## +# region + + +class TestModel_AgentAvailabilityMessage: + """ + Test Class for AgentAvailabilityMessage + """ + + def test_agent_availability_message_serialization(self): + """ + Test serialization/deserialization for AgentAvailabilityMessage + """ + + # Construct a json representation of a AgentAvailabilityMessage model + agent_availability_message_model_json = {} + agent_availability_message_model_json['message'] = 'testString' + + # Construct a model instance of AgentAvailabilityMessage by calling from_dict on the json representation + agent_availability_message_model = AgentAvailabilityMessage.from_dict(agent_availability_message_model_json) + assert agent_availability_message_model != False + + # Construct a model instance of AgentAvailabilityMessage by calling from_dict on the json representation + agent_availability_message_model_dict = AgentAvailabilityMessage.from_dict(agent_availability_message_model_json).__dict__ + agent_availability_message_model2 = AgentAvailabilityMessage(**agent_availability_message_model_dict) + + # Verify the model instances are equivalent + assert agent_availability_message_model == agent_availability_message_model2 + + # Convert model instance back to dict and verify no loss of data + agent_availability_message_model_json2 = agent_availability_message_model.to_dict() + assert agent_availability_message_model_json2 == agent_availability_message_model_json + + +class TestModel_BulkClassifyOutput: + """ + Test Class for BulkClassifyOutput + """ + + def test_bulk_classify_output_serialization(self): + """ + Test serialization/deserialization for BulkClassifyOutput + """ + + # Construct dict forms of any model objects needed in order to build this model. + + bulk_classify_utterance_model = {} # BulkClassifyUtterance + bulk_classify_utterance_model['text'] = 'testString' + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + + # Construct a json representation of a BulkClassifyOutput model + bulk_classify_output_model_json = {} + bulk_classify_output_model_json['input'] = bulk_classify_utterance_model + bulk_classify_output_model_json['entities'] = [runtime_entity_model] + bulk_classify_output_model_json['intents'] = [runtime_intent_model] + + # Construct a model instance of BulkClassifyOutput by calling from_dict on the json representation + bulk_classify_output_model = BulkClassifyOutput.from_dict(bulk_classify_output_model_json) + assert bulk_classify_output_model != False + + # Construct a model instance of BulkClassifyOutput by calling from_dict on the json representation + bulk_classify_output_model_dict = BulkClassifyOutput.from_dict(bulk_classify_output_model_json).__dict__ + bulk_classify_output_model2 = BulkClassifyOutput(**bulk_classify_output_model_dict) + + # Verify the model instances are equivalent + assert bulk_classify_output_model == bulk_classify_output_model2 + + # Convert model instance back to dict and verify no loss of data + bulk_classify_output_model_json2 = bulk_classify_output_model.to_dict() + assert bulk_classify_output_model_json2 == bulk_classify_output_model_json + + +class TestModel_BulkClassifyResponse: + """ + Test Class for BulkClassifyResponse + """ + + def test_bulk_classify_response_serialization(self): + """ + Test serialization/deserialization for BulkClassifyResponse + """ + + # Construct dict forms of any model objects needed in order to build this model. + + bulk_classify_utterance_model = {} # BulkClassifyUtterance + bulk_classify_utterance_model['text'] = 'testString' + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + + bulk_classify_output_model = {} # BulkClassifyOutput + bulk_classify_output_model['input'] = bulk_classify_utterance_model + bulk_classify_output_model['entities'] = [runtime_entity_model] + bulk_classify_output_model['intents'] = [runtime_intent_model] + + # Construct a json representation of a BulkClassifyResponse model + bulk_classify_response_model_json = {} + bulk_classify_response_model_json['output'] = [bulk_classify_output_model] + + # Construct a model instance of BulkClassifyResponse by calling from_dict on the json representation + bulk_classify_response_model = BulkClassifyResponse.from_dict(bulk_classify_response_model_json) + assert bulk_classify_response_model != False + + # Construct a model instance of BulkClassifyResponse by calling from_dict on the json representation + bulk_classify_response_model_dict = BulkClassifyResponse.from_dict(bulk_classify_response_model_json).__dict__ + bulk_classify_response_model2 = BulkClassifyResponse(**bulk_classify_response_model_dict) + + # Verify the model instances are equivalent + assert bulk_classify_response_model == bulk_classify_response_model2 + + # Convert model instance back to dict and verify no loss of data + bulk_classify_response_model_json2 = bulk_classify_response_model.to_dict() + assert bulk_classify_response_model_json2 == bulk_classify_response_model_json + + +class TestModel_BulkClassifyUtterance: + """ + Test Class for BulkClassifyUtterance + """ + + def test_bulk_classify_utterance_serialization(self): + """ + Test serialization/deserialization for BulkClassifyUtterance + """ + + # Construct a json representation of a BulkClassifyUtterance model + bulk_classify_utterance_model_json = {} + bulk_classify_utterance_model_json['text'] = 'testString' + + # Construct a model instance of BulkClassifyUtterance by calling from_dict on the json representation + bulk_classify_utterance_model = BulkClassifyUtterance.from_dict(bulk_classify_utterance_model_json) + assert bulk_classify_utterance_model != False + + # Construct a model instance of BulkClassifyUtterance by calling from_dict on the json representation + bulk_classify_utterance_model_dict = BulkClassifyUtterance.from_dict(bulk_classify_utterance_model_json).__dict__ + bulk_classify_utterance_model2 = BulkClassifyUtterance(**bulk_classify_utterance_model_dict) + + # Verify the model instances are equivalent + assert bulk_classify_utterance_model == bulk_classify_utterance_model2 + + # Convert model instance back to dict and verify no loss of data + bulk_classify_utterance_model_json2 = bulk_classify_utterance_model.to_dict() + assert bulk_classify_utterance_model_json2 == bulk_classify_utterance_model_json + + +class TestModel_CaptureGroup: + """ + Test Class for CaptureGroup + """ + + def test_capture_group_serialization(self): + """ + Test serialization/deserialization for CaptureGroup + """ + + # Construct a json representation of a CaptureGroup model + capture_group_model_json = {} + capture_group_model_json['group'] = 'testString' + capture_group_model_json['location'] = [38] + + # Construct a model instance of CaptureGroup by calling from_dict on the json representation + capture_group_model = CaptureGroup.from_dict(capture_group_model_json) + assert capture_group_model != False + + # Construct a model instance of CaptureGroup by calling from_dict on the json representation + capture_group_model_dict = CaptureGroup.from_dict(capture_group_model_json).__dict__ + capture_group_model2 = CaptureGroup(**capture_group_model_dict) + + # Verify the model instances are equivalent + assert capture_group_model == capture_group_model2 + + # Convert model instance back to dict and verify no loss of data + capture_group_model_json2 = capture_group_model.to_dict() + assert capture_group_model_json2 == capture_group_model_json + + +class TestModel_ChannelTransferInfo: + """ + Test Class for ChannelTransferInfo + """ + + def test_channel_transfer_info_serialization(self): + """ + Test serialization/deserialization for ChannelTransferInfo + """ + + # Construct dict forms of any model objects needed in order to build this model. + + channel_transfer_target_chat_model = {} # ChannelTransferTargetChat + channel_transfer_target_chat_model['url'] = 'testString' + + channel_transfer_target_model = {} # ChannelTransferTarget + channel_transfer_target_model['chat'] = channel_transfer_target_chat_model + + # Construct a json representation of a ChannelTransferInfo model + channel_transfer_info_model_json = {} + channel_transfer_info_model_json['target'] = channel_transfer_target_model + + # Construct a model instance of ChannelTransferInfo by calling from_dict on the json representation + channel_transfer_info_model = ChannelTransferInfo.from_dict(channel_transfer_info_model_json) + assert channel_transfer_info_model != False + + # Construct a model instance of ChannelTransferInfo by calling from_dict on the json representation + channel_transfer_info_model_dict = ChannelTransferInfo.from_dict(channel_transfer_info_model_json).__dict__ + channel_transfer_info_model2 = ChannelTransferInfo(**channel_transfer_info_model_dict) + + # Verify the model instances are equivalent + assert channel_transfer_info_model == channel_transfer_info_model2 + + # Convert model instance back to dict and verify no loss of data + channel_transfer_info_model_json2 = channel_transfer_info_model.to_dict() + assert channel_transfer_info_model_json2 == channel_transfer_info_model_json + + +class TestModel_ChannelTransferTarget: + """ + Test Class for ChannelTransferTarget + """ + + def test_channel_transfer_target_serialization(self): + """ + Test serialization/deserialization for ChannelTransferTarget + """ + + # Construct dict forms of any model objects needed in order to build this model. + + channel_transfer_target_chat_model = {} # ChannelTransferTargetChat + channel_transfer_target_chat_model['url'] = 'testString' + + # Construct a json representation of a ChannelTransferTarget model + channel_transfer_target_model_json = {} + channel_transfer_target_model_json['chat'] = channel_transfer_target_chat_model + + # Construct a model instance of ChannelTransferTarget by calling from_dict on the json representation + channel_transfer_target_model = ChannelTransferTarget.from_dict(channel_transfer_target_model_json) + assert channel_transfer_target_model != False + + # Construct a model instance of ChannelTransferTarget by calling from_dict on the json representation + channel_transfer_target_model_dict = ChannelTransferTarget.from_dict(channel_transfer_target_model_json).__dict__ + channel_transfer_target_model2 = ChannelTransferTarget(**channel_transfer_target_model_dict) + + # Verify the model instances are equivalent + assert channel_transfer_target_model == channel_transfer_target_model2 + + # Convert model instance back to dict and verify no loss of data + channel_transfer_target_model_json2 = channel_transfer_target_model.to_dict() + assert channel_transfer_target_model_json2 == channel_transfer_target_model_json + + +class TestModel_ChannelTransferTargetChat: + """ + Test Class for ChannelTransferTargetChat + """ + + def test_channel_transfer_target_chat_serialization(self): + """ + Test serialization/deserialization for ChannelTransferTargetChat + """ + + # Construct a json representation of a ChannelTransferTargetChat model + channel_transfer_target_chat_model_json = {} + channel_transfer_target_chat_model_json['url'] = 'testString' + + # Construct a model instance of ChannelTransferTargetChat by calling from_dict on the json representation + channel_transfer_target_chat_model = ChannelTransferTargetChat.from_dict(channel_transfer_target_chat_model_json) + assert channel_transfer_target_chat_model != False + + # Construct a model instance of ChannelTransferTargetChat by calling from_dict on the json representation + channel_transfer_target_chat_model_dict = ChannelTransferTargetChat.from_dict(channel_transfer_target_chat_model_json).__dict__ + channel_transfer_target_chat_model2 = ChannelTransferTargetChat(**channel_transfer_target_chat_model_dict) + + # Verify the model instances are equivalent + assert channel_transfer_target_chat_model == channel_transfer_target_chat_model2 + + # Convert model instance back to dict and verify no loss of data + channel_transfer_target_chat_model_json2 = channel_transfer_target_chat_model.to_dict() + assert channel_transfer_target_chat_model_json2 == channel_transfer_target_chat_model_json + + +class TestModel_Context: + """ + Test Class for Context + """ + + def test_context_serialization(self): + """ + Test serialization/deserialization for Context + """ + + # Construct dict forms of any model objects needed in order to build this model. + + message_context_metadata_model = {} # MessageContextMetadata + message_context_metadata_model['deployment'] = 'testString' + message_context_metadata_model['user_id'] = 'testString' + + # Construct a json representation of a Context model + context_model_json = {} + context_model_json['conversation_id'] = 'testString' + context_model_json['system'] = {'anyKey': 'anyValue'} + context_model_json['metadata'] = message_context_metadata_model + context_model_json['foo'] = 'testString' + + # Construct a model instance of Context by calling from_dict on the json representation + context_model = Context.from_dict(context_model_json) + assert context_model != False + + # Construct a model instance of Context by calling from_dict on the json representation + context_model_dict = Context.from_dict(context_model_json).__dict__ + context_model2 = Context(**context_model_dict) + + # Verify the model instances are equivalent + assert context_model == context_model2 + + # Convert model instance back to dict and verify no loss of data + context_model_json2 = context_model.to_dict() + assert context_model_json2 == context_model_json + + # Test get_properties and set_properties methods. + context_model.set_properties({}) + actual_dict = context_model.get_properties() + assert actual_dict == {} + + expected_dict = {'foo': 'testString'} + context_model.set_properties(expected_dict) + actual_dict = context_model.get_properties() + assert actual_dict.keys() == expected_dict.keys() + + +class TestModel_Counterexample: + """ + Test Class for Counterexample + """ + + def test_counterexample_serialization(self): + """ + Test serialization/deserialization for Counterexample + """ + + # Construct a json representation of a Counterexample model + counterexample_model_json = {} + counterexample_model_json['text'] = 'testString' + + # Construct a model instance of Counterexample by calling from_dict on the json representation + counterexample_model = Counterexample.from_dict(counterexample_model_json) + assert counterexample_model != False + + # Construct a model instance of Counterexample by calling from_dict on the json representation + counterexample_model_dict = Counterexample.from_dict(counterexample_model_json).__dict__ + counterexample_model2 = Counterexample(**counterexample_model_dict) + + # Verify the model instances are equivalent + assert counterexample_model == counterexample_model2 + + # Convert model instance back to dict and verify no loss of data + counterexample_model_json2 = counterexample_model.to_dict() + assert counterexample_model_json2 == counterexample_model_json + + +class TestModel_CounterexampleCollection: + """ + Test Class for CounterexampleCollection + """ + + def test_counterexample_collection_serialization(self): + """ + Test serialization/deserialization for CounterexampleCollection + """ + + # Construct dict forms of any model objects needed in order to build this model. + + counterexample_model = {} # Counterexample + counterexample_model['text'] = 'testString' + + pagination_model = {} # Pagination + pagination_model['refresh_url'] = 'testString' + pagination_model['next_url'] = 'testString' + pagination_model['total'] = 38 + pagination_model['matched'] = 38 + pagination_model['refresh_cursor'] = 'testString' + pagination_model['next_cursor'] = 'testString' + + # Construct a json representation of a CounterexampleCollection model + counterexample_collection_model_json = {} + counterexample_collection_model_json['counterexamples'] = [counterexample_model] + counterexample_collection_model_json['pagination'] = pagination_model + + # Construct a model instance of CounterexampleCollection by calling from_dict on the json representation + counterexample_collection_model = CounterexampleCollection.from_dict(counterexample_collection_model_json) + assert counterexample_collection_model != False + + # Construct a model instance of CounterexampleCollection by calling from_dict on the json representation + counterexample_collection_model_dict = CounterexampleCollection.from_dict(counterexample_collection_model_json).__dict__ + counterexample_collection_model2 = CounterexampleCollection(**counterexample_collection_model_dict) + + # Verify the model instances are equivalent + assert counterexample_collection_model == counterexample_collection_model2 + + # Convert model instance back to dict and verify no loss of data + counterexample_collection_model_json2 = counterexample_collection_model.to_dict() + assert counterexample_collection_model_json2 == counterexample_collection_model_json + + +class TestModel_CreateEntity: + """ + Test Class for CreateEntity + """ + + def test_create_entity_serialization(self): + """ + Test serialization/deserialization for CreateEntity + """ + + # Construct dict forms of any model objects needed in order to build this model. + + create_value_model = {} # CreateValue + create_value_model['value'] = 'testString' + create_value_model['metadata'] = {'anyKey': 'anyValue'} + create_value_model['type'] = 'synonyms' + create_value_model['synonyms'] = ['testString'] + create_value_model['patterns'] = ['testString'] + + # Construct a json representation of a CreateEntity model + create_entity_model_json = {} + create_entity_model_json['entity'] = 'testString' + create_entity_model_json['description'] = 'testString' + create_entity_model_json['metadata'] = {'anyKey': 'anyValue'} + create_entity_model_json['fuzzy_match'] = True + create_entity_model_json['values'] = [create_value_model] + + # Construct a model instance of CreateEntity by calling from_dict on the json representation + create_entity_model = CreateEntity.from_dict(create_entity_model_json) + assert create_entity_model != False + + # Construct a model instance of CreateEntity by calling from_dict on the json representation + create_entity_model_dict = CreateEntity.from_dict(create_entity_model_json).__dict__ + create_entity_model2 = CreateEntity(**create_entity_model_dict) + + # Verify the model instances are equivalent + assert create_entity_model == create_entity_model2 + + # Convert model instance back to dict and verify no loss of data + create_entity_model_json2 = create_entity_model.to_dict() + assert create_entity_model_json2 == create_entity_model_json + + +class TestModel_CreateIntent: + """ + Test Class for CreateIntent + """ + + def test_create_intent_serialization(self): + """ + Test serialization/deserialization for CreateIntent + """ + + # Construct dict forms of any model objects needed in order to build this model. + + mention_model = {} # Mention + mention_model['entity'] = 'testString' + mention_model['location'] = [38] + + example_model = {} # Example + example_model['text'] = 'testString' + example_model['mentions'] = [mention_model] + + # Construct a json representation of a CreateIntent model + create_intent_model_json = {} + create_intent_model_json['intent'] = 'testString' + create_intent_model_json['description'] = 'testString' + create_intent_model_json['examples'] = [example_model] + + # Construct a model instance of CreateIntent by calling from_dict on the json representation + create_intent_model = CreateIntent.from_dict(create_intent_model_json) + assert create_intent_model != False + + # Construct a model instance of CreateIntent by calling from_dict on the json representation + create_intent_model_dict = CreateIntent.from_dict(create_intent_model_json).__dict__ + create_intent_model2 = CreateIntent(**create_intent_model_dict) + + # Verify the model instances are equivalent + assert create_intent_model == create_intent_model2 + + # Convert model instance back to dict and verify no loss of data + create_intent_model_json2 = create_intent_model.to_dict() + assert create_intent_model_json2 == create_intent_model_json + + +class TestModel_CreateValue: + """ + Test Class for CreateValue + """ + + def test_create_value_serialization(self): + """ + Test serialization/deserialization for CreateValue + """ + + # Construct a json representation of a CreateValue model + create_value_model_json = {} + create_value_model_json['value'] = 'testString' + create_value_model_json['metadata'] = {'anyKey': 'anyValue'} + create_value_model_json['type'] = 'synonyms' + create_value_model_json['synonyms'] = ['testString'] + create_value_model_json['patterns'] = ['testString'] + + # Construct a model instance of CreateValue by calling from_dict on the json representation + create_value_model = CreateValue.from_dict(create_value_model_json) + assert create_value_model != False + + # Construct a model instance of CreateValue by calling from_dict on the json representation + create_value_model_dict = CreateValue.from_dict(create_value_model_json).__dict__ + create_value_model2 = CreateValue(**create_value_model_dict) + + # Verify the model instances are equivalent + assert create_value_model == create_value_model2 + + # Convert model instance back to dict and verify no loss of data + create_value_model_json2 = create_value_model.to_dict() + assert create_value_model_json2 == create_value_model_json + + +class TestModel_DialogNode: + """ + Test Class for DialogNode + """ + + def test_dialog_node_serialization(self): + """ + Test serialization/deserialization for DialogNode + """ + + # Construct dict forms of any model objects needed in order to build this model. + + dialog_node_output_text_values_element_model = {} # DialogNodeOutputTextValuesElement + dialog_node_output_text_values_element_model['text'] = 'testString' + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'chat' + + dialog_node_output_generic_model = {} # DialogNodeOutputGenericDialogNodeOutputResponseTypeText + dialog_node_output_generic_model['response_type'] = 'text' + dialog_node_output_generic_model['values'] = [dialog_node_output_text_values_element_model] + dialog_node_output_generic_model['selection_policy'] = 'sequential' + dialog_node_output_generic_model['delimiter'] = '\\n' + dialog_node_output_generic_model['channels'] = [response_generic_channel_model] + + dialog_node_output_modifiers_model = {} # DialogNodeOutputModifiers + dialog_node_output_modifiers_model['overwrite'] = True + + dialog_node_output_model = {} # DialogNodeOutput + dialog_node_output_model['generic'] = [dialog_node_output_generic_model] + dialog_node_output_model['integrations'] = {'key1': {'anyKey': 'anyValue'}} + dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model + dialog_node_output_model['foo'] = 'testString' + + dialog_node_context_model = {} # DialogNodeContext + dialog_node_context_model['integrations'] = {'key1': {'anyKey': 'anyValue'}} + dialog_node_context_model['foo'] = 'testString' + + dialog_node_next_step_model = {} # DialogNodeNextStep + dialog_node_next_step_model['behavior'] = 'get_user_input' + dialog_node_next_step_model['dialog_node'] = 'testString' + dialog_node_next_step_model['selector'] = 'condition' + + dialog_node_action_model = {} # DialogNodeAction + dialog_node_action_model['name'] = 'testString' + dialog_node_action_model['type'] = 'client' + dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'} + dialog_node_action_model['result_variable'] = 'testString' + dialog_node_action_model['credentials'] = 'testString' + + # Construct a json representation of a DialogNode model + dialog_node_model_json = {} + dialog_node_model_json['dialog_node'] = 'testString' + dialog_node_model_json['description'] = 'testString' + dialog_node_model_json['conditions'] = 'testString' + dialog_node_model_json['parent'] = 'testString' + dialog_node_model_json['previous_sibling'] = 'testString' + dialog_node_model_json['output'] = dialog_node_output_model + dialog_node_model_json['context'] = dialog_node_context_model + dialog_node_model_json['metadata'] = {'anyKey': 'anyValue'} + dialog_node_model_json['next_step'] = dialog_node_next_step_model + dialog_node_model_json['title'] = 'testString' + dialog_node_model_json['type'] = 'standard' + dialog_node_model_json['event_name'] = 'focus' + dialog_node_model_json['variable'] = 'testString' + dialog_node_model_json['actions'] = [dialog_node_action_model] + dialog_node_model_json['digress_in'] = 'not_available' + dialog_node_model_json['digress_out'] = 'allow_returning' + dialog_node_model_json['digress_out_slots'] = 'not_allowed' + dialog_node_model_json['user_label'] = 'testString' + dialog_node_model_json['disambiguation_opt_out'] = False + + # Construct a model instance of DialogNode by calling from_dict on the json representation + dialog_node_model = DialogNode.from_dict(dialog_node_model_json) + assert dialog_node_model != False + + # Construct a model instance of DialogNode by calling from_dict on the json representation + dialog_node_model_dict = DialogNode.from_dict(dialog_node_model_json).__dict__ + dialog_node_model2 = DialogNode(**dialog_node_model_dict) + + # Verify the model instances are equivalent + assert dialog_node_model == dialog_node_model2 + + # Convert model instance back to dict and verify no loss of data + dialog_node_model_json2 = dialog_node_model.to_dict() + assert dialog_node_model_json2 == dialog_node_model_json + + +class TestModel_DialogNodeAction: + """ + Test Class for DialogNodeAction + """ + + def test_dialog_node_action_serialization(self): + """ + Test serialization/deserialization for DialogNodeAction + """ + + # Construct a json representation of a DialogNodeAction model + dialog_node_action_model_json = {} + dialog_node_action_model_json['name'] = 'testString' + dialog_node_action_model_json['type'] = 'client' + dialog_node_action_model_json['parameters'] = {'anyKey': 'anyValue'} + dialog_node_action_model_json['result_variable'] = 'testString' + dialog_node_action_model_json['credentials'] = 'testString' + + # Construct a model instance of DialogNodeAction by calling from_dict on the json representation + dialog_node_action_model = DialogNodeAction.from_dict(dialog_node_action_model_json) + assert dialog_node_action_model != False + + # Construct a model instance of DialogNodeAction by calling from_dict on the json representation + dialog_node_action_model_dict = DialogNodeAction.from_dict(dialog_node_action_model_json).__dict__ + dialog_node_action_model2 = DialogNodeAction(**dialog_node_action_model_dict) + + # Verify the model instances are equivalent + assert dialog_node_action_model == dialog_node_action_model2 + + # Convert model instance back to dict and verify no loss of data + dialog_node_action_model_json2 = dialog_node_action_model.to_dict() + assert dialog_node_action_model_json2 == dialog_node_action_model_json + + +class TestModel_DialogNodeCollection: + """ + Test Class for DialogNodeCollection + """ + + def test_dialog_node_collection_serialization(self): + """ + Test serialization/deserialization for DialogNodeCollection + """ + + # Construct dict forms of any model objects needed in order to build this model. + + dialog_node_output_text_values_element_model = {} # DialogNodeOutputTextValuesElement + dialog_node_output_text_values_element_model['text'] = 'testString' + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'chat' + + dialog_node_output_generic_model = {} # DialogNodeOutputGenericDialogNodeOutputResponseTypeText + dialog_node_output_generic_model['response_type'] = 'text' + dialog_node_output_generic_model['values'] = [dialog_node_output_text_values_element_model] + dialog_node_output_generic_model['selection_policy'] = 'sequential' + dialog_node_output_generic_model['delimiter'] = '\\n' + dialog_node_output_generic_model['channels'] = [response_generic_channel_model] + + dialog_node_output_modifiers_model = {} # DialogNodeOutputModifiers + dialog_node_output_modifiers_model['overwrite'] = True + + dialog_node_output_model = {} # DialogNodeOutput + dialog_node_output_model['generic'] = [dialog_node_output_generic_model] + dialog_node_output_model['integrations'] = {'key1': {'anyKey': 'anyValue'}} + dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model + dialog_node_output_model['foo'] = 'testString' + + dialog_node_context_model = {} # DialogNodeContext + dialog_node_context_model['integrations'] = {'key1': {'anyKey': 'anyValue'}} + dialog_node_context_model['foo'] = 'testString' + + dialog_node_next_step_model = {} # DialogNodeNextStep + dialog_node_next_step_model['behavior'] = 'get_user_input' + dialog_node_next_step_model['dialog_node'] = 'testString' + dialog_node_next_step_model['selector'] = 'condition' + + dialog_node_action_model = {} # DialogNodeAction + dialog_node_action_model['name'] = 'testString' + dialog_node_action_model['type'] = 'client' + dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'} + dialog_node_action_model['result_variable'] = 'testString' + dialog_node_action_model['credentials'] = 'testString' + + dialog_node_model = {} # DialogNode + dialog_node_model['dialog_node'] = 'testString' + dialog_node_model['description'] = 'testString' + dialog_node_model['conditions'] = 'testString' + dialog_node_model['parent'] = 'testString' + dialog_node_model['previous_sibling'] = 'testString' + dialog_node_model['output'] = dialog_node_output_model + dialog_node_model['context'] = dialog_node_context_model + dialog_node_model['metadata'] = {'anyKey': 'anyValue'} + dialog_node_model['next_step'] = dialog_node_next_step_model + dialog_node_model['title'] = 'testString' + dialog_node_model['type'] = 'standard' + dialog_node_model['event_name'] = 'focus' + dialog_node_model['variable'] = 'testString' + dialog_node_model['actions'] = [dialog_node_action_model] + dialog_node_model['digress_in'] = 'not_available' + dialog_node_model['digress_out'] = 'allow_returning' + dialog_node_model['digress_out_slots'] = 'not_allowed' + dialog_node_model['user_label'] = 'testString' + dialog_node_model['disambiguation_opt_out'] = False + + pagination_model = {} # Pagination + pagination_model['refresh_url'] = 'testString' + pagination_model['next_url'] = 'testString' + pagination_model['total'] = 38 + pagination_model['matched'] = 38 + pagination_model['refresh_cursor'] = 'testString' + pagination_model['next_cursor'] = 'testString' + + # Construct a json representation of a DialogNodeCollection model + dialog_node_collection_model_json = {} + dialog_node_collection_model_json['dialog_nodes'] = [dialog_node_model] + dialog_node_collection_model_json['pagination'] = pagination_model + + # Construct a model instance of DialogNodeCollection by calling from_dict on the json representation + dialog_node_collection_model = DialogNodeCollection.from_dict(dialog_node_collection_model_json) + assert dialog_node_collection_model != False + + # Construct a model instance of DialogNodeCollection by calling from_dict on the json representation + dialog_node_collection_model_dict = DialogNodeCollection.from_dict(dialog_node_collection_model_json).__dict__ + dialog_node_collection_model2 = DialogNodeCollection(**dialog_node_collection_model_dict) + + # Verify the model instances are equivalent + assert dialog_node_collection_model == dialog_node_collection_model2 + + # Convert model instance back to dict and verify no loss of data + dialog_node_collection_model_json2 = dialog_node_collection_model.to_dict() + assert dialog_node_collection_model_json2 == dialog_node_collection_model_json + + +class TestModel_DialogNodeContext: + """ + Test Class for DialogNodeContext + """ + + def test_dialog_node_context_serialization(self): + """ + Test serialization/deserialization for DialogNodeContext + """ + + # Construct a json representation of a DialogNodeContext model + dialog_node_context_model_json = {} + dialog_node_context_model_json['integrations'] = {'key1': {'anyKey': 'anyValue'}} + dialog_node_context_model_json['foo'] = 'testString' + + # Construct a model instance of DialogNodeContext by calling from_dict on the json representation + dialog_node_context_model = DialogNodeContext.from_dict(dialog_node_context_model_json) + assert dialog_node_context_model != False + + # Construct a model instance of DialogNodeContext by calling from_dict on the json representation + dialog_node_context_model_dict = DialogNodeContext.from_dict(dialog_node_context_model_json).__dict__ + dialog_node_context_model2 = DialogNodeContext(**dialog_node_context_model_dict) + + # Verify the model instances are equivalent + assert dialog_node_context_model == dialog_node_context_model2 + + # Convert model instance back to dict and verify no loss of data + dialog_node_context_model_json2 = dialog_node_context_model.to_dict() + assert dialog_node_context_model_json2 == dialog_node_context_model_json + + # Test get_properties and set_properties methods. + dialog_node_context_model.set_properties({}) + actual_dict = dialog_node_context_model.get_properties() + assert actual_dict == {} + + expected_dict = {'foo': 'testString'} + dialog_node_context_model.set_properties(expected_dict) + actual_dict = dialog_node_context_model.get_properties() + assert actual_dict.keys() == expected_dict.keys() + + +class TestModel_DialogNodeNextStep: + """ + Test Class for DialogNodeNextStep + """ + + def test_dialog_node_next_step_serialization(self): + """ + Test serialization/deserialization for DialogNodeNextStep + """ + + # Construct a json representation of a DialogNodeNextStep model + dialog_node_next_step_model_json = {} + dialog_node_next_step_model_json['behavior'] = 'get_user_input' + dialog_node_next_step_model_json['dialog_node'] = 'testString' + dialog_node_next_step_model_json['selector'] = 'condition' + + # Construct a model instance of DialogNodeNextStep by calling from_dict on the json representation + dialog_node_next_step_model = DialogNodeNextStep.from_dict(dialog_node_next_step_model_json) + assert dialog_node_next_step_model != False + + # Construct a model instance of DialogNodeNextStep by calling from_dict on the json representation + dialog_node_next_step_model_dict = DialogNodeNextStep.from_dict(dialog_node_next_step_model_json).__dict__ + dialog_node_next_step_model2 = DialogNodeNextStep(**dialog_node_next_step_model_dict) + + # Verify the model instances are equivalent + assert dialog_node_next_step_model == dialog_node_next_step_model2 + + # Convert model instance back to dict and verify no loss of data + dialog_node_next_step_model_json2 = dialog_node_next_step_model.to_dict() + assert dialog_node_next_step_model_json2 == dialog_node_next_step_model_json + + +class TestModel_DialogNodeOutput: + """ + Test Class for DialogNodeOutput + """ + + def test_dialog_node_output_serialization(self): + """ + Test serialization/deserialization for DialogNodeOutput + """ + + # Construct dict forms of any model objects needed in order to build this model. + + dialog_node_output_text_values_element_model = {} # DialogNodeOutputTextValuesElement + dialog_node_output_text_values_element_model['text'] = 'testString' + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'chat' + + dialog_node_output_generic_model = {} # DialogNodeOutputGenericDialogNodeOutputResponseTypeText + dialog_node_output_generic_model['response_type'] = 'text' + dialog_node_output_generic_model['values'] = [dialog_node_output_text_values_element_model] + dialog_node_output_generic_model['selection_policy'] = 'sequential' + dialog_node_output_generic_model['delimiter'] = '\\n' + dialog_node_output_generic_model['channels'] = [response_generic_channel_model] + + dialog_node_output_modifiers_model = {} # DialogNodeOutputModifiers + dialog_node_output_modifiers_model['overwrite'] = True + + # Construct a json representation of a DialogNodeOutput model + dialog_node_output_model_json = {} + dialog_node_output_model_json['generic'] = [dialog_node_output_generic_model] + dialog_node_output_model_json['integrations'] = {'key1': {'anyKey': 'anyValue'}} + dialog_node_output_model_json['modifiers'] = dialog_node_output_modifiers_model + dialog_node_output_model_json['foo'] = 'testString' + + # Construct a model instance of DialogNodeOutput by calling from_dict on the json representation + dialog_node_output_model = DialogNodeOutput.from_dict(dialog_node_output_model_json) + assert dialog_node_output_model != False + + # Construct a model instance of DialogNodeOutput by calling from_dict on the json representation + dialog_node_output_model_dict = DialogNodeOutput.from_dict(dialog_node_output_model_json).__dict__ + dialog_node_output_model2 = DialogNodeOutput(**dialog_node_output_model_dict) + + # Verify the model instances are equivalent + assert dialog_node_output_model == dialog_node_output_model2 + + # Convert model instance back to dict and verify no loss of data + dialog_node_output_model_json2 = dialog_node_output_model.to_dict() + assert dialog_node_output_model_json2 == dialog_node_output_model_json + + # Test get_properties and set_properties methods. + dialog_node_output_model.set_properties({}) + actual_dict = dialog_node_output_model.get_properties() + assert actual_dict == {} + + expected_dict = {'foo': 'testString'} + dialog_node_output_model.set_properties(expected_dict) + actual_dict = dialog_node_output_model.get_properties() + assert actual_dict.keys() == expected_dict.keys() + + +class TestModel_DialogNodeOutputConnectToAgentTransferInfo: + """ + Test Class for DialogNodeOutputConnectToAgentTransferInfo + """ + + def test_dialog_node_output_connect_to_agent_transfer_info_serialization(self): + """ + Test serialization/deserialization for DialogNodeOutputConnectToAgentTransferInfo + """ + + # Construct a json representation of a DialogNodeOutputConnectToAgentTransferInfo model + dialog_node_output_connect_to_agent_transfer_info_model_json = {} + dialog_node_output_connect_to_agent_transfer_info_model_json['target'] = {'key1': {'anyKey': 'anyValue'}} + + # Construct a model instance of DialogNodeOutputConnectToAgentTransferInfo by calling from_dict on the json representation + dialog_node_output_connect_to_agent_transfer_info_model = DialogNodeOutputConnectToAgentTransferInfo.from_dict(dialog_node_output_connect_to_agent_transfer_info_model_json) + assert dialog_node_output_connect_to_agent_transfer_info_model != False + + # Construct a model instance of DialogNodeOutputConnectToAgentTransferInfo by calling from_dict on the json representation + dialog_node_output_connect_to_agent_transfer_info_model_dict = DialogNodeOutputConnectToAgentTransferInfo.from_dict(dialog_node_output_connect_to_agent_transfer_info_model_json).__dict__ + dialog_node_output_connect_to_agent_transfer_info_model2 = DialogNodeOutputConnectToAgentTransferInfo(**dialog_node_output_connect_to_agent_transfer_info_model_dict) + + # Verify the model instances are equivalent + assert dialog_node_output_connect_to_agent_transfer_info_model == dialog_node_output_connect_to_agent_transfer_info_model2 + + # Convert model instance back to dict and verify no loss of data + dialog_node_output_connect_to_agent_transfer_info_model_json2 = dialog_node_output_connect_to_agent_transfer_info_model.to_dict() + assert dialog_node_output_connect_to_agent_transfer_info_model_json2 == dialog_node_output_connect_to_agent_transfer_info_model_json + + +class TestModel_DialogNodeOutputModifiers: + """ + Test Class for DialogNodeOutputModifiers + """ + + def test_dialog_node_output_modifiers_serialization(self): + """ + Test serialization/deserialization for DialogNodeOutputModifiers + """ + + # Construct a json representation of a DialogNodeOutputModifiers model + dialog_node_output_modifiers_model_json = {} + dialog_node_output_modifiers_model_json['overwrite'] = True + + # Construct a model instance of DialogNodeOutputModifiers by calling from_dict on the json representation + dialog_node_output_modifiers_model = DialogNodeOutputModifiers.from_dict(dialog_node_output_modifiers_model_json) + assert dialog_node_output_modifiers_model != False + + # Construct a model instance of DialogNodeOutputModifiers by calling from_dict on the json representation + dialog_node_output_modifiers_model_dict = DialogNodeOutputModifiers.from_dict(dialog_node_output_modifiers_model_json).__dict__ + dialog_node_output_modifiers_model2 = DialogNodeOutputModifiers(**dialog_node_output_modifiers_model_dict) + + # Verify the model instances are equivalent + assert dialog_node_output_modifiers_model == dialog_node_output_modifiers_model2 + + # Convert model instance back to dict and verify no loss of data + dialog_node_output_modifiers_model_json2 = dialog_node_output_modifiers_model.to_dict() + assert dialog_node_output_modifiers_model_json2 == dialog_node_output_modifiers_model_json + + +class TestModel_DialogNodeOutputOptionsElement: + """ + Test Class for DialogNodeOutputOptionsElement + """ + + def test_dialog_node_output_options_element_serialization(self): + """ + Test serialization/deserialization for DialogNodeOutputOptionsElement + """ + + # Construct dict forms of any model objects needed in order to build this model. + + message_input_model = {} # MessageInput + message_input_model['text'] = 'testString' + message_input_model['spelling_suggestions'] = False + message_input_model['spelling_auto_correct'] = False + message_input_model['foo'] = 'testString' + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + + dialog_node_output_options_element_value_model = {} # DialogNodeOutputOptionsElementValue + dialog_node_output_options_element_value_model['input'] = message_input_model + dialog_node_output_options_element_value_model['intents'] = [runtime_intent_model] + dialog_node_output_options_element_value_model['entities'] = [runtime_entity_model] + + # Construct a json representation of a DialogNodeOutputOptionsElement model + dialog_node_output_options_element_model_json = {} + dialog_node_output_options_element_model_json['label'] = 'testString' + dialog_node_output_options_element_model_json['value'] = dialog_node_output_options_element_value_model + + # Construct a model instance of DialogNodeOutputOptionsElement by calling from_dict on the json representation + dialog_node_output_options_element_model = DialogNodeOutputOptionsElement.from_dict(dialog_node_output_options_element_model_json) + assert dialog_node_output_options_element_model != False + + # Construct a model instance of DialogNodeOutputOptionsElement by calling from_dict on the json representation + dialog_node_output_options_element_model_dict = DialogNodeOutputOptionsElement.from_dict(dialog_node_output_options_element_model_json).__dict__ + dialog_node_output_options_element_model2 = DialogNodeOutputOptionsElement(**dialog_node_output_options_element_model_dict) + + # Verify the model instances are equivalent + assert dialog_node_output_options_element_model == dialog_node_output_options_element_model2 + + # Convert model instance back to dict and verify no loss of data + dialog_node_output_options_element_model_json2 = dialog_node_output_options_element_model.to_dict() + assert dialog_node_output_options_element_model_json2 == dialog_node_output_options_element_model_json + + +class TestModel_DialogNodeOutputOptionsElementValue: + """ + Test Class for DialogNodeOutputOptionsElementValue + """ + + def test_dialog_node_output_options_element_value_serialization(self): + """ + Test serialization/deserialization for DialogNodeOutputOptionsElementValue + """ + + # Construct dict forms of any model objects needed in order to build this model. + + message_input_model = {} # MessageInput + message_input_model['text'] = 'testString' + message_input_model['spelling_suggestions'] = False + message_input_model['spelling_auto_correct'] = False + message_input_model['foo'] = 'testString' + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + + # Construct a json representation of a DialogNodeOutputOptionsElementValue model + dialog_node_output_options_element_value_model_json = {} + dialog_node_output_options_element_value_model_json['input'] = message_input_model + dialog_node_output_options_element_value_model_json['intents'] = [runtime_intent_model] + dialog_node_output_options_element_value_model_json['entities'] = [runtime_entity_model] + + # Construct a model instance of DialogNodeOutputOptionsElementValue by calling from_dict on the json representation + dialog_node_output_options_element_value_model = DialogNodeOutputOptionsElementValue.from_dict(dialog_node_output_options_element_value_model_json) + assert dialog_node_output_options_element_value_model != False + + # Construct a model instance of DialogNodeOutputOptionsElementValue by calling from_dict on the json representation + dialog_node_output_options_element_value_model_dict = DialogNodeOutputOptionsElementValue.from_dict(dialog_node_output_options_element_value_model_json).__dict__ + dialog_node_output_options_element_value_model2 = DialogNodeOutputOptionsElementValue(**dialog_node_output_options_element_value_model_dict) + + # Verify the model instances are equivalent + assert dialog_node_output_options_element_value_model == dialog_node_output_options_element_value_model2 + + # Convert model instance back to dict and verify no loss of data + dialog_node_output_options_element_value_model_json2 = dialog_node_output_options_element_value_model.to_dict() + assert dialog_node_output_options_element_value_model_json2 == dialog_node_output_options_element_value_model_json + + +class TestModel_DialogNodeOutputTextValuesElement: + """ + Test Class for DialogNodeOutputTextValuesElement + """ + + def test_dialog_node_output_text_values_element_serialization(self): + """ + Test serialization/deserialization for DialogNodeOutputTextValuesElement + """ + + # Construct a json representation of a DialogNodeOutputTextValuesElement model + dialog_node_output_text_values_element_model_json = {} + dialog_node_output_text_values_element_model_json['text'] = 'testString' + + # Construct a model instance of DialogNodeOutputTextValuesElement by calling from_dict on the json representation + dialog_node_output_text_values_element_model = DialogNodeOutputTextValuesElement.from_dict(dialog_node_output_text_values_element_model_json) + assert dialog_node_output_text_values_element_model != False + + # Construct a model instance of DialogNodeOutputTextValuesElement by calling from_dict on the json representation + dialog_node_output_text_values_element_model_dict = DialogNodeOutputTextValuesElement.from_dict(dialog_node_output_text_values_element_model_json).__dict__ + dialog_node_output_text_values_element_model2 = DialogNodeOutputTextValuesElement(**dialog_node_output_text_values_element_model_dict) + + # Verify the model instances are equivalent + assert dialog_node_output_text_values_element_model == dialog_node_output_text_values_element_model2 + + # Convert model instance back to dict and verify no loss of data + dialog_node_output_text_values_element_model_json2 = dialog_node_output_text_values_element_model.to_dict() + assert dialog_node_output_text_values_element_model_json2 == dialog_node_output_text_values_element_model_json + + +class TestModel_DialogNodeVisitedDetails: + """ + Test Class for DialogNodeVisitedDetails + """ + + def test_dialog_node_visited_details_serialization(self): + """ + Test serialization/deserialization for DialogNodeVisitedDetails + """ + + # Construct a json representation of a DialogNodeVisitedDetails model + dialog_node_visited_details_model_json = {} + dialog_node_visited_details_model_json['dialog_node'] = 'testString' + dialog_node_visited_details_model_json['title'] = 'testString' + dialog_node_visited_details_model_json['conditions'] = 'testString' + + # Construct a model instance of DialogNodeVisitedDetails by calling from_dict on the json representation + dialog_node_visited_details_model = DialogNodeVisitedDetails.from_dict(dialog_node_visited_details_model_json) + assert dialog_node_visited_details_model != False + + # Construct a model instance of DialogNodeVisitedDetails by calling from_dict on the json representation + dialog_node_visited_details_model_dict = DialogNodeVisitedDetails.from_dict(dialog_node_visited_details_model_json).__dict__ + dialog_node_visited_details_model2 = DialogNodeVisitedDetails(**dialog_node_visited_details_model_dict) + + # Verify the model instances are equivalent + assert dialog_node_visited_details_model == dialog_node_visited_details_model2 + + # Convert model instance back to dict and verify no loss of data + dialog_node_visited_details_model_json2 = dialog_node_visited_details_model.to_dict() + assert dialog_node_visited_details_model_json2 == dialog_node_visited_details_model_json + + +class TestModel_DialogSuggestion: + """ + Test Class for DialogSuggestion + """ + + def test_dialog_suggestion_serialization(self): + """ + Test serialization/deserialization for DialogSuggestion + """ + + # Construct dict forms of any model objects needed in order to build this model. + + message_input_model = {} # MessageInput + message_input_model['text'] = 'testString' + message_input_model['spelling_suggestions'] = False + message_input_model['spelling_auto_correct'] = False + message_input_model['foo'] = 'testString' + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + + dialog_suggestion_value_model = {} # DialogSuggestionValue + dialog_suggestion_value_model['input'] = message_input_model + dialog_suggestion_value_model['intents'] = [runtime_intent_model] + dialog_suggestion_value_model['entities'] = [runtime_entity_model] + + # Construct a json representation of a DialogSuggestion model + dialog_suggestion_model_json = {} + dialog_suggestion_model_json['label'] = 'testString' + dialog_suggestion_model_json['value'] = dialog_suggestion_value_model + dialog_suggestion_model_json['output'] = {'anyKey': 'anyValue'} + dialog_suggestion_model_json['dialog_node'] = 'testString' + + # Construct a model instance of DialogSuggestion by calling from_dict on the json representation + dialog_suggestion_model = DialogSuggestion.from_dict(dialog_suggestion_model_json) + assert dialog_suggestion_model != False + + # Construct a model instance of DialogSuggestion by calling from_dict on the json representation + dialog_suggestion_model_dict = DialogSuggestion.from_dict(dialog_suggestion_model_json).__dict__ + dialog_suggestion_model2 = DialogSuggestion(**dialog_suggestion_model_dict) + + # Verify the model instances are equivalent + assert dialog_suggestion_model == dialog_suggestion_model2 + + # Convert model instance back to dict and verify no loss of data + dialog_suggestion_model_json2 = dialog_suggestion_model.to_dict() + assert dialog_suggestion_model_json2 == dialog_suggestion_model_json + + +class TestModel_DialogSuggestionValue: + """ + Test Class for DialogSuggestionValue + """ + + def test_dialog_suggestion_value_serialization(self): + """ + Test serialization/deserialization for DialogSuggestionValue + """ + + # Construct dict forms of any model objects needed in order to build this model. + + message_input_model = {} # MessageInput + message_input_model['text'] = 'testString' + message_input_model['spelling_suggestions'] = False + message_input_model['spelling_auto_correct'] = False + message_input_model['foo'] = 'testString' + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + + # Construct a json representation of a DialogSuggestionValue model + dialog_suggestion_value_model_json = {} + dialog_suggestion_value_model_json['input'] = message_input_model + dialog_suggestion_value_model_json['intents'] = [runtime_intent_model] + dialog_suggestion_value_model_json['entities'] = [runtime_entity_model] + + # Construct a model instance of DialogSuggestionValue by calling from_dict on the json representation + dialog_suggestion_value_model = DialogSuggestionValue.from_dict(dialog_suggestion_value_model_json) + assert dialog_suggestion_value_model != False + + # Construct a model instance of DialogSuggestionValue by calling from_dict on the json representation + dialog_suggestion_value_model_dict = DialogSuggestionValue.from_dict(dialog_suggestion_value_model_json).__dict__ + dialog_suggestion_value_model2 = DialogSuggestionValue(**dialog_suggestion_value_model_dict) + + # Verify the model instances are equivalent + assert dialog_suggestion_value_model == dialog_suggestion_value_model2 + + # Convert model instance back to dict and verify no loss of data + dialog_suggestion_value_model_json2 = dialog_suggestion_value_model.to_dict() + assert dialog_suggestion_value_model_json2 == dialog_suggestion_value_model_json + + +class TestModel_Entity: + """ + Test Class for Entity + """ + + def test_entity_serialization(self): + """ + Test serialization/deserialization for Entity + """ + + # Construct dict forms of any model objects needed in order to build this model. + + value_model = {} # Value + value_model['value'] = 'testString' + value_model['metadata'] = {'anyKey': 'anyValue'} + value_model['type'] = 'synonyms' + value_model['synonyms'] = ['testString'] + value_model['patterns'] = ['testString'] + + # Construct a json representation of a Entity model + entity_model_json = {} + entity_model_json['entity'] = 'testString' + entity_model_json['description'] = 'testString' + entity_model_json['metadata'] = {'anyKey': 'anyValue'} + entity_model_json['fuzzy_match'] = True + entity_model_json['values'] = [value_model] + + # Construct a model instance of Entity by calling from_dict on the json representation + entity_model = Entity.from_dict(entity_model_json) + assert entity_model != False + + # Construct a model instance of Entity by calling from_dict on the json representation + entity_model_dict = Entity.from_dict(entity_model_json).__dict__ + entity_model2 = Entity(**entity_model_dict) + + # Verify the model instances are equivalent + assert entity_model == entity_model2 + + # Convert model instance back to dict and verify no loss of data + entity_model_json2 = entity_model.to_dict() + assert entity_model_json2 == entity_model_json + + +class TestModel_EntityCollection: + """ + Test Class for EntityCollection + """ + + def test_entity_collection_serialization(self): + """ + Test serialization/deserialization for EntityCollection + """ + + # Construct dict forms of any model objects needed in order to build this model. + + value_model = {} # Value + value_model['value'] = 'testString' + value_model['metadata'] = {'anyKey': 'anyValue'} + value_model['type'] = 'synonyms' + value_model['synonyms'] = ['testString'] + value_model['patterns'] = ['testString'] + + entity_model = {} # Entity + entity_model['entity'] = 'testString' + entity_model['description'] = 'testString' + entity_model['metadata'] = {'anyKey': 'anyValue'} + entity_model['fuzzy_match'] = True + entity_model['values'] = [value_model] + + pagination_model = {} # Pagination + pagination_model['refresh_url'] = 'testString' + pagination_model['next_url'] = 'testString' + pagination_model['total'] = 38 + pagination_model['matched'] = 38 + pagination_model['refresh_cursor'] = 'testString' + pagination_model['next_cursor'] = 'testString' + + # Construct a json representation of a EntityCollection model + entity_collection_model_json = {} + entity_collection_model_json['entities'] = [entity_model] + entity_collection_model_json['pagination'] = pagination_model + + # Construct a model instance of EntityCollection by calling from_dict on the json representation + entity_collection_model = EntityCollection.from_dict(entity_collection_model_json) + assert entity_collection_model != False + + # Construct a model instance of EntityCollection by calling from_dict on the json representation + entity_collection_model_dict = EntityCollection.from_dict(entity_collection_model_json).__dict__ + entity_collection_model2 = EntityCollection(**entity_collection_model_dict) + + # Verify the model instances are equivalent + assert entity_collection_model == entity_collection_model2 + + # Convert model instance back to dict and verify no loss of data + entity_collection_model_json2 = entity_collection_model.to_dict() + assert entity_collection_model_json2 == entity_collection_model_json + + +class TestModel_EntityMention: + """ + Test Class for EntityMention + """ + + def test_entity_mention_serialization(self): + """ + Test serialization/deserialization for EntityMention + """ + + # Construct a json representation of a EntityMention model + entity_mention_model_json = {} + entity_mention_model_json['text'] = 'testString' + entity_mention_model_json['intent'] = 'testString' + entity_mention_model_json['location'] = [38] + + # Construct a model instance of EntityMention by calling from_dict on the json representation + entity_mention_model = EntityMention.from_dict(entity_mention_model_json) + assert entity_mention_model != False + + # Construct a model instance of EntityMention by calling from_dict on the json representation + entity_mention_model_dict = EntityMention.from_dict(entity_mention_model_json).__dict__ + entity_mention_model2 = EntityMention(**entity_mention_model_dict) + + # Verify the model instances are equivalent + assert entity_mention_model == entity_mention_model2 + + # Convert model instance back to dict and verify no loss of data + entity_mention_model_json2 = entity_mention_model.to_dict() + assert entity_mention_model_json2 == entity_mention_model_json + + +class TestModel_EntityMentionCollection: + """ + Test Class for EntityMentionCollection + """ + + def test_entity_mention_collection_serialization(self): + """ + Test serialization/deserialization for EntityMentionCollection + """ + + # Construct dict forms of any model objects needed in order to build this model. + + entity_mention_model = {} # EntityMention + entity_mention_model['text'] = 'testString' + entity_mention_model['intent'] = 'testString' + entity_mention_model['location'] = [38] + + pagination_model = {} # Pagination + pagination_model['refresh_url'] = 'testString' + pagination_model['next_url'] = 'testString' + pagination_model['total'] = 38 + pagination_model['matched'] = 38 + pagination_model['refresh_cursor'] = 'testString' + pagination_model['next_cursor'] = 'testString' + + # Construct a json representation of a EntityMentionCollection model + entity_mention_collection_model_json = {} + entity_mention_collection_model_json['examples'] = [entity_mention_model] + entity_mention_collection_model_json['pagination'] = pagination_model + + # Construct a model instance of EntityMentionCollection by calling from_dict on the json representation + entity_mention_collection_model = EntityMentionCollection.from_dict(entity_mention_collection_model_json) + assert entity_mention_collection_model != False + + # Construct a model instance of EntityMentionCollection by calling from_dict on the json representation + entity_mention_collection_model_dict = EntityMentionCollection.from_dict(entity_mention_collection_model_json).__dict__ + entity_mention_collection_model2 = EntityMentionCollection(**entity_mention_collection_model_dict) + + # Verify the model instances are equivalent + assert entity_mention_collection_model == entity_mention_collection_model2 + + # Convert model instance back to dict and verify no loss of data + entity_mention_collection_model_json2 = entity_mention_collection_model.to_dict() + assert entity_mention_collection_model_json2 == entity_mention_collection_model_json + + +class TestModel_Example: + """ + Test Class for Example + """ + + def test_example_serialization(self): + """ + Test serialization/deserialization for Example + """ + + # Construct dict forms of any model objects needed in order to build this model. + + mention_model = {} # Mention + mention_model['entity'] = 'testString' + mention_model['location'] = [38] + + # Construct a json representation of a Example model + example_model_json = {} + example_model_json['text'] = 'testString' + example_model_json['mentions'] = [mention_model] + + # Construct a model instance of Example by calling from_dict on the json representation + example_model = Example.from_dict(example_model_json) + assert example_model != False + + # Construct a model instance of Example by calling from_dict on the json representation + example_model_dict = Example.from_dict(example_model_json).__dict__ + example_model2 = Example(**example_model_dict) + + # Verify the model instances are equivalent + assert example_model == example_model2 + + # Convert model instance back to dict and verify no loss of data + example_model_json2 = example_model.to_dict() + assert example_model_json2 == example_model_json + + +class TestModel_ExampleCollection: + """ + Test Class for ExampleCollection + """ + + def test_example_collection_serialization(self): + """ + Test serialization/deserialization for ExampleCollection + """ + + # Construct dict forms of any model objects needed in order to build this model. + + mention_model = {} # Mention + mention_model['entity'] = 'testString' + mention_model['location'] = [38] + + example_model = {} # Example + example_model['text'] = 'testString' + example_model['mentions'] = [mention_model] + + pagination_model = {} # Pagination + pagination_model['refresh_url'] = 'testString' + pagination_model['next_url'] = 'testString' + pagination_model['total'] = 38 + pagination_model['matched'] = 38 + pagination_model['refresh_cursor'] = 'testString' + pagination_model['next_cursor'] = 'testString' + + # Construct a json representation of a ExampleCollection model + example_collection_model_json = {} + example_collection_model_json['examples'] = [example_model] + example_collection_model_json['pagination'] = pagination_model + + # Construct a model instance of ExampleCollection by calling from_dict on the json representation + example_collection_model = ExampleCollection.from_dict(example_collection_model_json) + assert example_collection_model != False + + # Construct a model instance of ExampleCollection by calling from_dict on the json representation + example_collection_model_dict = ExampleCollection.from_dict(example_collection_model_json).__dict__ + example_collection_model2 = ExampleCollection(**example_collection_model_dict) + + # Verify the model instances are equivalent + assert example_collection_model == example_collection_model2 + + # Convert model instance back to dict and verify no loss of data + example_collection_model_json2 = example_collection_model.to_dict() + assert example_collection_model_json2 == example_collection_model_json + + +class TestModel_Intent: + """ + Test Class for Intent + """ + + def test_intent_serialization(self): + """ + Test serialization/deserialization for Intent + """ + + # Construct dict forms of any model objects needed in order to build this model. + + mention_model = {} # Mention + mention_model['entity'] = 'testString' + mention_model['location'] = [38] + + example_model = {} # Example + example_model['text'] = 'testString' + example_model['mentions'] = [mention_model] + + # Construct a json representation of a Intent model + intent_model_json = {} + intent_model_json['intent'] = 'testString' + intent_model_json['description'] = 'testString' + intent_model_json['examples'] = [example_model] + + # Construct a model instance of Intent by calling from_dict on the json representation + intent_model = Intent.from_dict(intent_model_json) + assert intent_model != False + + # Construct a model instance of Intent by calling from_dict on the json representation + intent_model_dict = Intent.from_dict(intent_model_json).__dict__ + intent_model2 = Intent(**intent_model_dict) + + # Verify the model instances are equivalent + assert intent_model == intent_model2 + + # Convert model instance back to dict and verify no loss of data + intent_model_json2 = intent_model.to_dict() + assert intent_model_json2 == intent_model_json + + +class TestModel_IntentCollection: + """ + Test Class for IntentCollection + """ + + def test_intent_collection_serialization(self): + """ + Test serialization/deserialization for IntentCollection + """ + + # Construct dict forms of any model objects needed in order to build this model. + + mention_model = {} # Mention + mention_model['entity'] = 'testString' + mention_model['location'] = [38] + + example_model = {} # Example + example_model['text'] = 'testString' + example_model['mentions'] = [mention_model] + + intent_model = {} # Intent + intent_model['intent'] = 'testString' + intent_model['description'] = 'testString' + intent_model['examples'] = [example_model] + + pagination_model = {} # Pagination + pagination_model['refresh_url'] = 'testString' + pagination_model['next_url'] = 'testString' + pagination_model['total'] = 38 + pagination_model['matched'] = 38 + pagination_model['refresh_cursor'] = 'testString' + pagination_model['next_cursor'] = 'testString' + + # Construct a json representation of a IntentCollection model + intent_collection_model_json = {} + intent_collection_model_json['intents'] = [intent_model] + intent_collection_model_json['pagination'] = pagination_model + + # Construct a model instance of IntentCollection by calling from_dict on the json representation + intent_collection_model = IntentCollection.from_dict(intent_collection_model_json) + assert intent_collection_model != False + + # Construct a model instance of IntentCollection by calling from_dict on the json representation + intent_collection_model_dict = IntentCollection.from_dict(intent_collection_model_json).__dict__ + intent_collection_model2 = IntentCollection(**intent_collection_model_dict) + + # Verify the model instances are equivalent + assert intent_collection_model == intent_collection_model2 + + # Convert model instance back to dict and verify no loss of data + intent_collection_model_json2 = intent_collection_model.to_dict() + assert intent_collection_model_json2 == intent_collection_model_json + + +class TestModel_Log: + """ + Test Class for Log + """ + + def test_log_serialization(self): + """ + Test serialization/deserialization for Log + """ + + # Construct dict forms of any model objects needed in order to build this model. + + message_input_model = {} # MessageInput + message_input_model['text'] = 'testString' + message_input_model['spelling_suggestions'] = False + message_input_model['spelling_auto_correct'] = False + message_input_model['foo'] = 'testString' + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + + message_context_metadata_model = {} # MessageContextMetadata + message_context_metadata_model['deployment'] = 'testString' + message_context_metadata_model['user_id'] = 'testString' + + context_model = {} # Context + context_model['conversation_id'] = 'testString' + context_model['system'] = {'anyKey': 'anyValue'} + context_model['metadata'] = message_context_metadata_model + context_model['foo'] = 'testString' + + dialog_node_visited_details_model = {} # DialogNodeVisitedDetails + dialog_node_visited_details_model['dialog_node'] = 'testString' + dialog_node_visited_details_model['title'] = 'testString' + dialog_node_visited_details_model['conditions'] = 'testString' + + log_message_source_model = {} # LogMessageSource + log_message_source_model['type'] = 'dialog_node' + log_message_source_model['dialog_node'] = 'testString' + + log_message_model = {} # LogMessage + log_message_model['level'] = 'info' + log_message_model['msg'] = 'testString' + log_message_model['code'] = 'testString' + log_message_model['source'] = log_message_source_model + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'chat' + + runtime_response_generic_model = {} # RuntimeResponseGenericRuntimeResponseTypeText + runtime_response_generic_model['response_type'] = 'text' + runtime_response_generic_model['text'] = 'testString' + runtime_response_generic_model['channels'] = [response_generic_channel_model] + + output_data_model = {} # OutputData + output_data_model['nodes_visited'] = ['testString'] + output_data_model['nodes_visited_details'] = [dialog_node_visited_details_model] + output_data_model['log_messages'] = [log_message_model] + output_data_model['generic'] = [runtime_response_generic_model] + output_data_model['foo'] = 'testString' + + message_request_model = {} # MessageRequest + message_request_model['input'] = message_input_model + message_request_model['intents'] = [runtime_intent_model] + message_request_model['entities'] = [runtime_entity_model] + message_request_model['alternate_intents'] = False + message_request_model['context'] = context_model + message_request_model['output'] = output_data_model + message_request_model['user_id'] = 'testString' + + message_response_model = {} # MessageResponse + message_response_model['input'] = message_input_model + message_response_model['intents'] = [runtime_intent_model] + message_response_model['entities'] = [runtime_entity_model] + message_response_model['alternate_intents'] = False + message_response_model['context'] = context_model + message_response_model['output'] = output_data_model + message_response_model['user_id'] = 'testString' + + # Construct a json representation of a Log model + log_model_json = {} + log_model_json['request'] = message_request_model + log_model_json['response'] = message_response_model + log_model_json['log_id'] = 'testString' + log_model_json['request_timestamp'] = 'testString' + log_model_json['response_timestamp'] = 'testString' + log_model_json['workspace_id'] = 'testString' + log_model_json['language'] = 'testString' + + # Construct a model instance of Log by calling from_dict on the json representation + log_model = Log.from_dict(log_model_json) + assert log_model != False + + # Construct a model instance of Log by calling from_dict on the json representation + log_model_dict = Log.from_dict(log_model_json).__dict__ + log_model2 = Log(**log_model_dict) + + # Verify the model instances are equivalent + assert log_model == log_model2 + + # Convert model instance back to dict and verify no loss of data + log_model_json2 = log_model.to_dict() + assert log_model_json2 == log_model_json + + +class TestModel_LogCollection: + """ + Test Class for LogCollection + """ + + def test_log_collection_serialization(self): + """ + Test serialization/deserialization for LogCollection + """ + + # Construct dict forms of any model objects needed in order to build this model. + + message_input_model = {} # MessageInput + message_input_model['text'] = 'testString' + message_input_model['spelling_suggestions'] = False + message_input_model['spelling_auto_correct'] = False + message_input_model['foo'] = 'testString' + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + + message_context_metadata_model = {} # MessageContextMetadata + message_context_metadata_model['deployment'] = 'testString' + message_context_metadata_model['user_id'] = 'testString' + + context_model = {} # Context + context_model['conversation_id'] = 'testString' + context_model['system'] = {'anyKey': 'anyValue'} + context_model['metadata'] = message_context_metadata_model + context_model['foo'] = 'testString' + + dialog_node_visited_details_model = {} # DialogNodeVisitedDetails + dialog_node_visited_details_model['dialog_node'] = 'testString' + dialog_node_visited_details_model['title'] = 'testString' + dialog_node_visited_details_model['conditions'] = 'testString' + + log_message_source_model = {} # LogMessageSource + log_message_source_model['type'] = 'dialog_node' + log_message_source_model['dialog_node'] = 'testString' + + log_message_model = {} # LogMessage + log_message_model['level'] = 'info' + log_message_model['msg'] = 'testString' + log_message_model['code'] = 'testString' + log_message_model['source'] = log_message_source_model + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'chat' + + runtime_response_generic_model = {} # RuntimeResponseGenericRuntimeResponseTypeText + runtime_response_generic_model['response_type'] = 'text' + runtime_response_generic_model['text'] = 'testString' + runtime_response_generic_model['channels'] = [response_generic_channel_model] + + output_data_model = {} # OutputData + output_data_model['nodes_visited'] = ['testString'] + output_data_model['nodes_visited_details'] = [dialog_node_visited_details_model] + output_data_model['log_messages'] = [log_message_model] + output_data_model['generic'] = [runtime_response_generic_model] + output_data_model['foo'] = 'testString' + + message_request_model = {} # MessageRequest + message_request_model['input'] = message_input_model + message_request_model['intents'] = [runtime_intent_model] + message_request_model['entities'] = [runtime_entity_model] + message_request_model['alternate_intents'] = False + message_request_model['context'] = context_model + message_request_model['output'] = output_data_model + message_request_model['user_id'] = 'testString' + + message_response_model = {} # MessageResponse + message_response_model['input'] = message_input_model + message_response_model['intents'] = [runtime_intent_model] + message_response_model['entities'] = [runtime_entity_model] + message_response_model['alternate_intents'] = False + message_response_model['context'] = context_model + message_response_model['output'] = output_data_model + message_response_model['user_id'] = 'testString' + + log_model = {} # Log + log_model['request'] = message_request_model + log_model['response'] = message_response_model + log_model['log_id'] = 'testString' + log_model['request_timestamp'] = 'testString' + log_model['response_timestamp'] = 'testString' + log_model['workspace_id'] = 'testString' + log_model['language'] = 'testString' + + log_pagination_model = {} # LogPagination + log_pagination_model['next_url'] = 'testString' + log_pagination_model['matched'] = 38 + log_pagination_model['next_cursor'] = 'testString' + + # Construct a json representation of a LogCollection model + log_collection_model_json = {} + log_collection_model_json['logs'] = [log_model] + log_collection_model_json['pagination'] = log_pagination_model + + # Construct a model instance of LogCollection by calling from_dict on the json representation + log_collection_model = LogCollection.from_dict(log_collection_model_json) + assert log_collection_model != False + + # Construct a model instance of LogCollection by calling from_dict on the json representation + log_collection_model_dict = LogCollection.from_dict(log_collection_model_json).__dict__ + log_collection_model2 = LogCollection(**log_collection_model_dict) + + # Verify the model instances are equivalent + assert log_collection_model == log_collection_model2 + + # Convert model instance back to dict and verify no loss of data + log_collection_model_json2 = log_collection_model.to_dict() + assert log_collection_model_json2 == log_collection_model_json + + +class TestModel_LogMessage: + """ + Test Class for LogMessage + """ + + def test_log_message_serialization(self): + """ + Test serialization/deserialization for LogMessage + """ + + # Construct dict forms of any model objects needed in order to build this model. + + log_message_source_model = {} # LogMessageSource + log_message_source_model['type'] = 'dialog_node' + log_message_source_model['dialog_node'] = 'testString' + + # Construct a json representation of a LogMessage model + log_message_model_json = {} + log_message_model_json['level'] = 'info' + log_message_model_json['msg'] = 'testString' + log_message_model_json['code'] = 'testString' + log_message_model_json['source'] = log_message_source_model + + # Construct a model instance of LogMessage by calling from_dict on the json representation + log_message_model = LogMessage.from_dict(log_message_model_json) + assert log_message_model != False + + # Construct a model instance of LogMessage by calling from_dict on the json representation + log_message_model_dict = LogMessage.from_dict(log_message_model_json).__dict__ + log_message_model2 = LogMessage(**log_message_model_dict) + + # Verify the model instances are equivalent + assert log_message_model == log_message_model2 + + # Convert model instance back to dict and verify no loss of data + log_message_model_json2 = log_message_model.to_dict() + assert log_message_model_json2 == log_message_model_json + + +class TestModel_LogMessageSource: + """ + Test Class for LogMessageSource + """ + + def test_log_message_source_serialization(self): + """ + Test serialization/deserialization for LogMessageSource + """ + + # Construct a json representation of a LogMessageSource model + log_message_source_model_json = {} + log_message_source_model_json['type'] = 'dialog_node' + log_message_source_model_json['dialog_node'] = 'testString' + + # Construct a model instance of LogMessageSource by calling from_dict on the json representation + log_message_source_model = LogMessageSource.from_dict(log_message_source_model_json) + assert log_message_source_model != False + + # Construct a model instance of LogMessageSource by calling from_dict on the json representation + log_message_source_model_dict = LogMessageSource.from_dict(log_message_source_model_json).__dict__ + log_message_source_model2 = LogMessageSource(**log_message_source_model_dict) + + # Verify the model instances are equivalent + assert log_message_source_model == log_message_source_model2 + + # Convert model instance back to dict and verify no loss of data + log_message_source_model_json2 = log_message_source_model.to_dict() + assert log_message_source_model_json2 == log_message_source_model_json + + +class TestModel_LogPagination: + """ + Test Class for LogPagination + """ + + def test_log_pagination_serialization(self): + """ + Test serialization/deserialization for LogPagination + """ + + # Construct a json representation of a LogPagination model + log_pagination_model_json = {} + log_pagination_model_json['next_url'] = 'testString' + log_pagination_model_json['matched'] = 38 + log_pagination_model_json['next_cursor'] = 'testString' + + # Construct a model instance of LogPagination by calling from_dict on the json representation + log_pagination_model = LogPagination.from_dict(log_pagination_model_json) + assert log_pagination_model != False + + # Construct a model instance of LogPagination by calling from_dict on the json representation + log_pagination_model_dict = LogPagination.from_dict(log_pagination_model_json).__dict__ + log_pagination_model2 = LogPagination(**log_pagination_model_dict) + + # Verify the model instances are equivalent + assert log_pagination_model == log_pagination_model2 + + # Convert model instance back to dict and verify no loss of data + log_pagination_model_json2 = log_pagination_model.to_dict() + assert log_pagination_model_json2 == log_pagination_model_json + + +class TestModel_Mention: + """ + Test Class for Mention + """ + + def test_mention_serialization(self): + """ + Test serialization/deserialization for Mention + """ + + # Construct a json representation of a Mention model + mention_model_json = {} + mention_model_json['entity'] = 'testString' + mention_model_json['location'] = [38] + + # Construct a model instance of Mention by calling from_dict on the json representation + mention_model = Mention.from_dict(mention_model_json) + assert mention_model != False + + # Construct a model instance of Mention by calling from_dict on the json representation + mention_model_dict = Mention.from_dict(mention_model_json).__dict__ + mention_model2 = Mention(**mention_model_dict) + + # Verify the model instances are equivalent + assert mention_model == mention_model2 + + # Convert model instance back to dict and verify no loss of data + mention_model_json2 = mention_model.to_dict() + assert mention_model_json2 == mention_model_json + + +class TestModel_MessageContextMetadata: + """ + Test Class for MessageContextMetadata + """ + + def test_message_context_metadata_serialization(self): + """ + Test serialization/deserialization for MessageContextMetadata + """ + + # Construct a json representation of a MessageContextMetadata model + message_context_metadata_model_json = {} + message_context_metadata_model_json['deployment'] = 'testString' + message_context_metadata_model_json['user_id'] = 'testString' + + # Construct a model instance of MessageContextMetadata by calling from_dict on the json representation + message_context_metadata_model = MessageContextMetadata.from_dict(message_context_metadata_model_json) + assert message_context_metadata_model != False + + # Construct a model instance of MessageContextMetadata by calling from_dict on the json representation + message_context_metadata_model_dict = MessageContextMetadata.from_dict(message_context_metadata_model_json).__dict__ + message_context_metadata_model2 = MessageContextMetadata(**message_context_metadata_model_dict) + + # Verify the model instances are equivalent + assert message_context_metadata_model == message_context_metadata_model2 + + # Convert model instance back to dict and verify no loss of data + message_context_metadata_model_json2 = message_context_metadata_model.to_dict() + assert message_context_metadata_model_json2 == message_context_metadata_model_json + + +class TestModel_MessageInput: + """ + Test Class for MessageInput + """ + + def test_message_input_serialization(self): + """ + Test serialization/deserialization for MessageInput + """ + + # Construct a json representation of a MessageInput model + message_input_model_json = {} + message_input_model_json['text'] = 'testString' + message_input_model_json['spelling_suggestions'] = False + message_input_model_json['spelling_auto_correct'] = False + message_input_model_json['foo'] = 'testString' + + # Construct a model instance of MessageInput by calling from_dict on the json representation + message_input_model = MessageInput.from_dict(message_input_model_json) + assert message_input_model != False + + # Construct a model instance of MessageInput by calling from_dict on the json representation + message_input_model_dict = MessageInput.from_dict(message_input_model_json).__dict__ + message_input_model2 = MessageInput(**message_input_model_dict) + + # Verify the model instances are equivalent + assert message_input_model == message_input_model2 + + # Convert model instance back to dict and verify no loss of data + message_input_model_json2 = message_input_model.to_dict() + assert message_input_model_json2 == message_input_model_json + + # Test get_properties and set_properties methods. + message_input_model.set_properties({}) + actual_dict = message_input_model.get_properties() + assert actual_dict == {} + + expected_dict = {'foo': 'testString'} + message_input_model.set_properties(expected_dict) + actual_dict = message_input_model.get_properties() + assert actual_dict.keys() == expected_dict.keys() + + +class TestModel_MessageRequest: + """ + Test Class for MessageRequest + """ + + def test_message_request_serialization(self): + """ + Test serialization/deserialization for MessageRequest + """ + + # Construct dict forms of any model objects needed in order to build this model. + + message_input_model = {} # MessageInput + message_input_model['text'] = 'testString' + message_input_model['spelling_suggestions'] = False + message_input_model['spelling_auto_correct'] = False + message_input_model['foo'] = 'testString' + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + + message_context_metadata_model = {} # MessageContextMetadata + message_context_metadata_model['deployment'] = 'testString' + message_context_metadata_model['user_id'] = 'testString' + + context_model = {} # Context + context_model['conversation_id'] = 'testString' + context_model['system'] = {'anyKey': 'anyValue'} + context_model['metadata'] = message_context_metadata_model + context_model['foo'] = 'testString' + + dialog_node_visited_details_model = {} # DialogNodeVisitedDetails + dialog_node_visited_details_model['dialog_node'] = 'testString' + dialog_node_visited_details_model['title'] = 'testString' + dialog_node_visited_details_model['conditions'] = 'testString' + + log_message_source_model = {} # LogMessageSource + log_message_source_model['type'] = 'dialog_node' + log_message_source_model['dialog_node'] = 'testString' + + log_message_model = {} # LogMessage + log_message_model['level'] = 'info' + log_message_model['msg'] = 'testString' + log_message_model['code'] = 'testString' + log_message_model['source'] = log_message_source_model + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'chat' + + runtime_response_generic_model = {} # RuntimeResponseGenericRuntimeResponseTypeText + runtime_response_generic_model['response_type'] = 'text' + runtime_response_generic_model['text'] = 'testString' + runtime_response_generic_model['channels'] = [response_generic_channel_model] + + output_data_model = {} # OutputData + output_data_model['nodes_visited'] = ['testString'] + output_data_model['nodes_visited_details'] = [dialog_node_visited_details_model] + output_data_model['log_messages'] = [log_message_model] + output_data_model['generic'] = [runtime_response_generic_model] + output_data_model['foo'] = 'testString' + + # Construct a json representation of a MessageRequest model + message_request_model_json = {} + message_request_model_json['input'] = message_input_model + message_request_model_json['intents'] = [runtime_intent_model] + message_request_model_json['entities'] = [runtime_entity_model] + message_request_model_json['alternate_intents'] = False + message_request_model_json['context'] = context_model + message_request_model_json['output'] = output_data_model + message_request_model_json['user_id'] = 'testString' + + # Construct a model instance of MessageRequest by calling from_dict on the json representation + message_request_model = MessageRequest.from_dict(message_request_model_json) + assert message_request_model != False + + # Construct a model instance of MessageRequest by calling from_dict on the json representation + message_request_model_dict = MessageRequest.from_dict(message_request_model_json).__dict__ + message_request_model2 = MessageRequest(**message_request_model_dict) + + # Verify the model instances are equivalent + assert message_request_model == message_request_model2 + + # Convert model instance back to dict and verify no loss of data + message_request_model_json2 = message_request_model.to_dict() + assert message_request_model_json2 == message_request_model_json + + +class TestModel_MessageResponse: + """ + Test Class for MessageResponse + """ + + def test_message_response_serialization(self): + """ + Test serialization/deserialization for MessageResponse + """ + + # Construct dict forms of any model objects needed in order to build this model. + + message_input_model = {} # MessageInput + message_input_model['text'] = 'testString' + message_input_model['spelling_suggestions'] = False + message_input_model['spelling_auto_correct'] = False + message_input_model['foo'] = 'testString' + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + + message_context_metadata_model = {} # MessageContextMetadata + message_context_metadata_model['deployment'] = 'testString' + message_context_metadata_model['user_id'] = 'testString' + + context_model = {} # Context + context_model['conversation_id'] = 'testString' + context_model['system'] = {'anyKey': 'anyValue'} + context_model['metadata'] = message_context_metadata_model + context_model['foo'] = 'testString' + + dialog_node_visited_details_model = {} # DialogNodeVisitedDetails + dialog_node_visited_details_model['dialog_node'] = 'testString' + dialog_node_visited_details_model['title'] = 'testString' + dialog_node_visited_details_model['conditions'] = 'testString' + + log_message_source_model = {} # LogMessageSource + log_message_source_model['type'] = 'dialog_node' + log_message_source_model['dialog_node'] = 'testString' + + log_message_model = {} # LogMessage + log_message_model['level'] = 'info' + log_message_model['msg'] = 'testString' + log_message_model['code'] = 'testString' + log_message_model['source'] = log_message_source_model + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'chat' + + runtime_response_generic_model = {} # RuntimeResponseGenericRuntimeResponseTypeText + runtime_response_generic_model['response_type'] = 'text' + runtime_response_generic_model['text'] = 'testString' + runtime_response_generic_model['channels'] = [response_generic_channel_model] + + output_data_model = {} # OutputData + output_data_model['nodes_visited'] = ['testString'] + output_data_model['nodes_visited_details'] = [dialog_node_visited_details_model] + output_data_model['log_messages'] = [log_message_model] + output_data_model['generic'] = [runtime_response_generic_model] + output_data_model['foo'] = 'testString' + + # Construct a json representation of a MessageResponse model + message_response_model_json = {} + message_response_model_json['input'] = message_input_model + message_response_model_json['intents'] = [runtime_intent_model] + message_response_model_json['entities'] = [runtime_entity_model] + message_response_model_json['alternate_intents'] = False + message_response_model_json['context'] = context_model + message_response_model_json['output'] = output_data_model + message_response_model_json['user_id'] = 'testString' + + # Construct a model instance of MessageResponse by calling from_dict on the json representation + message_response_model = MessageResponse.from_dict(message_response_model_json) + assert message_response_model != False + + # Construct a model instance of MessageResponse by calling from_dict on the json representation + message_response_model_dict = MessageResponse.from_dict(message_response_model_json).__dict__ + message_response_model2 = MessageResponse(**message_response_model_dict) + + # Verify the model instances are equivalent + assert message_response_model == message_response_model2 + + # Convert model instance back to dict and verify no loss of data + message_response_model_json2 = message_response_model.to_dict() + assert message_response_model_json2 == message_response_model_json + + +class TestModel_OutputData: + """ + Test Class for OutputData + """ + + def test_output_data_serialization(self): + """ + Test serialization/deserialization for OutputData + """ + + # Construct dict forms of any model objects needed in order to build this model. + + dialog_node_visited_details_model = {} # DialogNodeVisitedDetails + dialog_node_visited_details_model['dialog_node'] = 'testString' + dialog_node_visited_details_model['title'] = 'testString' + dialog_node_visited_details_model['conditions'] = 'testString' + + log_message_source_model = {} # LogMessageSource + log_message_source_model['type'] = 'dialog_node' + log_message_source_model['dialog_node'] = 'testString' + + log_message_model = {} # LogMessage + log_message_model['level'] = 'info' + log_message_model['msg'] = 'testString' + log_message_model['code'] = 'testString' + log_message_model['source'] = log_message_source_model + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'chat' + + runtime_response_generic_model = {} # RuntimeResponseGenericRuntimeResponseTypeText + runtime_response_generic_model['response_type'] = 'text' + runtime_response_generic_model['text'] = 'testString' + runtime_response_generic_model['channels'] = [response_generic_channel_model] + + # Construct a json representation of a OutputData model + output_data_model_json = {} + output_data_model_json['nodes_visited'] = ['testString'] + output_data_model_json['nodes_visited_details'] = [dialog_node_visited_details_model] + output_data_model_json['log_messages'] = [log_message_model] + output_data_model_json['generic'] = [runtime_response_generic_model] + output_data_model_json['foo'] = 'testString' + + # Construct a model instance of OutputData by calling from_dict on the json representation + output_data_model = OutputData.from_dict(output_data_model_json) + assert output_data_model != False + + # Construct a model instance of OutputData by calling from_dict on the json representation + output_data_model_dict = OutputData.from_dict(output_data_model_json).__dict__ + output_data_model2 = OutputData(**output_data_model_dict) + + # Verify the model instances are equivalent + assert output_data_model == output_data_model2 + + # Convert model instance back to dict and verify no loss of data + output_data_model_json2 = output_data_model.to_dict() + assert output_data_model_json2 == output_data_model_json + + # Test get_properties and set_properties methods. + output_data_model.set_properties({}) + actual_dict = output_data_model.get_properties() + assert actual_dict == {} + + expected_dict = {'foo': 'testString'} + output_data_model.set_properties(expected_dict) + actual_dict = output_data_model.get_properties() + assert actual_dict.keys() == expected_dict.keys() + + +class TestModel_Pagination: + """ + Test Class for Pagination + """ + + def test_pagination_serialization(self): + """ + Test serialization/deserialization for Pagination + """ + + # Construct a json representation of a Pagination model + pagination_model_json = {} + pagination_model_json['refresh_url'] = 'testString' + pagination_model_json['next_url'] = 'testString' + pagination_model_json['total'] = 38 + pagination_model_json['matched'] = 38 + pagination_model_json['refresh_cursor'] = 'testString' + pagination_model_json['next_cursor'] = 'testString' + + # Construct a model instance of Pagination by calling from_dict on the json representation + pagination_model = Pagination.from_dict(pagination_model_json) + assert pagination_model != False + + # Construct a model instance of Pagination by calling from_dict on the json representation + pagination_model_dict = Pagination.from_dict(pagination_model_json).__dict__ + pagination_model2 = Pagination(**pagination_model_dict) + + # Verify the model instances are equivalent + assert pagination_model == pagination_model2 + + # Convert model instance back to dict and verify no loss of data + pagination_model_json2 = pagination_model.to_dict() + assert pagination_model_json2 == pagination_model_json + + +class TestModel_ResponseGenericChannel: + """ + Test Class for ResponseGenericChannel + """ + + def test_response_generic_channel_serialization(self): + """ + Test serialization/deserialization for ResponseGenericChannel + """ + + # Construct a json representation of a ResponseGenericChannel model + response_generic_channel_model_json = {} + response_generic_channel_model_json['channel'] = 'chat' + + # Construct a model instance of ResponseGenericChannel by calling from_dict on the json representation + response_generic_channel_model = ResponseGenericChannel.from_dict(response_generic_channel_model_json) + assert response_generic_channel_model != False + + # Construct a model instance of ResponseGenericChannel by calling from_dict on the json representation + response_generic_channel_model_dict = ResponseGenericChannel.from_dict(response_generic_channel_model_json).__dict__ + response_generic_channel_model2 = ResponseGenericChannel(**response_generic_channel_model_dict) + + # Verify the model instances are equivalent + assert response_generic_channel_model == response_generic_channel_model2 + + # Convert model instance back to dict and verify no loss of data + response_generic_channel_model_json2 = response_generic_channel_model.to_dict() + assert response_generic_channel_model_json2 == response_generic_channel_model_json + + +class TestModel_RuntimeEntity: + """ + Test Class for RuntimeEntity + """ + + def test_runtime_entity_serialization(self): + """ + Test serialization/deserialization for RuntimeEntity + """ + + # Construct dict forms of any model objects needed in order to build this model. + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + # Construct a json representation of a RuntimeEntity model + runtime_entity_model_json = {} + runtime_entity_model_json['entity'] = 'testString' + runtime_entity_model_json['location'] = [38] + runtime_entity_model_json['value'] = 'testString' + runtime_entity_model_json['confidence'] = 72.5 + runtime_entity_model_json['groups'] = [capture_group_model] + runtime_entity_model_json['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model_json['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model_json['role'] = runtime_entity_role_model + + # Construct a model instance of RuntimeEntity by calling from_dict on the json representation + runtime_entity_model = RuntimeEntity.from_dict(runtime_entity_model_json) + assert runtime_entity_model != False + + # Construct a model instance of RuntimeEntity by calling from_dict on the json representation + runtime_entity_model_dict = RuntimeEntity.from_dict(runtime_entity_model_json).__dict__ + runtime_entity_model2 = RuntimeEntity(**runtime_entity_model_dict) + + # Verify the model instances are equivalent + assert runtime_entity_model == runtime_entity_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_entity_model_json2 = runtime_entity_model.to_dict() + assert runtime_entity_model_json2 == runtime_entity_model_json + + +class TestModel_RuntimeEntityAlternative: + """ + Test Class for RuntimeEntityAlternative + """ + + def test_runtime_entity_alternative_serialization(self): + """ + Test serialization/deserialization for RuntimeEntityAlternative + """ + + # Construct a json representation of a RuntimeEntityAlternative model + runtime_entity_alternative_model_json = {} + runtime_entity_alternative_model_json['value'] = 'testString' + runtime_entity_alternative_model_json['confidence'] = 72.5 + + # Construct a model instance of RuntimeEntityAlternative by calling from_dict on the json representation + runtime_entity_alternative_model = RuntimeEntityAlternative.from_dict(runtime_entity_alternative_model_json) + assert runtime_entity_alternative_model != False + + # Construct a model instance of RuntimeEntityAlternative by calling from_dict on the json representation + runtime_entity_alternative_model_dict = RuntimeEntityAlternative.from_dict(runtime_entity_alternative_model_json).__dict__ + runtime_entity_alternative_model2 = RuntimeEntityAlternative(**runtime_entity_alternative_model_dict) + + # Verify the model instances are equivalent + assert runtime_entity_alternative_model == runtime_entity_alternative_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_entity_alternative_model_json2 = runtime_entity_alternative_model.to_dict() + assert runtime_entity_alternative_model_json2 == runtime_entity_alternative_model_json + + +class TestModel_RuntimeEntityInterpretation: + """ + Test Class for RuntimeEntityInterpretation + """ + + def test_runtime_entity_interpretation_serialization(self): + """ + Test serialization/deserialization for RuntimeEntityInterpretation + """ + + # Construct a json representation of a RuntimeEntityInterpretation model + runtime_entity_interpretation_model_json = {} + runtime_entity_interpretation_model_json['calendar_type'] = 'testString' + runtime_entity_interpretation_model_json['datetime_link'] = 'testString' + runtime_entity_interpretation_model_json['festival'] = 'testString' + runtime_entity_interpretation_model_json['granularity'] = 'day' + runtime_entity_interpretation_model_json['range_link'] = 'testString' + runtime_entity_interpretation_model_json['range_modifier'] = 'testString' + runtime_entity_interpretation_model_json['relative_day'] = 72.5 + runtime_entity_interpretation_model_json['relative_month'] = 72.5 + runtime_entity_interpretation_model_json['relative_week'] = 72.5 + runtime_entity_interpretation_model_json['relative_weekend'] = 72.5 + runtime_entity_interpretation_model_json['relative_year'] = 72.5 + runtime_entity_interpretation_model_json['specific_day'] = 72.5 + runtime_entity_interpretation_model_json['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model_json['specific_month'] = 72.5 + runtime_entity_interpretation_model_json['specific_quarter'] = 72.5 + runtime_entity_interpretation_model_json['specific_year'] = 72.5 + runtime_entity_interpretation_model_json['numeric_value'] = 72.5 + runtime_entity_interpretation_model_json['subtype'] = 'testString' + runtime_entity_interpretation_model_json['part_of_day'] = 'testString' + runtime_entity_interpretation_model_json['relative_hour'] = 72.5 + runtime_entity_interpretation_model_json['relative_minute'] = 72.5 + runtime_entity_interpretation_model_json['relative_second'] = 72.5 + runtime_entity_interpretation_model_json['specific_hour'] = 72.5 + runtime_entity_interpretation_model_json['specific_minute'] = 72.5 + runtime_entity_interpretation_model_json['specific_second'] = 72.5 + runtime_entity_interpretation_model_json['timezone'] = 'testString' + + # Construct a model instance of RuntimeEntityInterpretation by calling from_dict on the json representation + runtime_entity_interpretation_model = RuntimeEntityInterpretation.from_dict(runtime_entity_interpretation_model_json) + assert runtime_entity_interpretation_model != False + + # Construct a model instance of RuntimeEntityInterpretation by calling from_dict on the json representation + runtime_entity_interpretation_model_dict = RuntimeEntityInterpretation.from_dict(runtime_entity_interpretation_model_json).__dict__ + runtime_entity_interpretation_model2 = RuntimeEntityInterpretation(**runtime_entity_interpretation_model_dict) + + # Verify the model instances are equivalent + assert runtime_entity_interpretation_model == runtime_entity_interpretation_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_entity_interpretation_model_json2 = runtime_entity_interpretation_model.to_dict() + assert runtime_entity_interpretation_model_json2 == runtime_entity_interpretation_model_json + + +class TestModel_RuntimeEntityRole: + """ + Test Class for RuntimeEntityRole + """ + + def test_runtime_entity_role_serialization(self): + """ + Test serialization/deserialization for RuntimeEntityRole + """ + + # Construct a json representation of a RuntimeEntityRole model + runtime_entity_role_model_json = {} + runtime_entity_role_model_json['type'] = 'date_from' + + # Construct a model instance of RuntimeEntityRole by calling from_dict on the json representation + runtime_entity_role_model = RuntimeEntityRole.from_dict(runtime_entity_role_model_json) + assert runtime_entity_role_model != False + + # Construct a model instance of RuntimeEntityRole by calling from_dict on the json representation + runtime_entity_role_model_dict = RuntimeEntityRole.from_dict(runtime_entity_role_model_json).__dict__ + runtime_entity_role_model2 = RuntimeEntityRole(**runtime_entity_role_model_dict) + + # Verify the model instances are equivalent + assert runtime_entity_role_model == runtime_entity_role_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_entity_role_model_json2 = runtime_entity_role_model.to_dict() + assert runtime_entity_role_model_json2 == runtime_entity_role_model_json + + +class TestModel_RuntimeIntent: + """ + Test Class for RuntimeIntent + """ + + def test_runtime_intent_serialization(self): + """ + Test serialization/deserialization for RuntimeIntent + """ + + # Construct a json representation of a RuntimeIntent model + runtime_intent_model_json = {} + runtime_intent_model_json['intent'] = 'testString' + runtime_intent_model_json['confidence'] = 72.5 + + # Construct a model instance of RuntimeIntent by calling from_dict on the json representation + runtime_intent_model = RuntimeIntent.from_dict(runtime_intent_model_json) + assert runtime_intent_model != False + + # Construct a model instance of RuntimeIntent by calling from_dict on the json representation + runtime_intent_model_dict = RuntimeIntent.from_dict(runtime_intent_model_json).__dict__ + runtime_intent_model2 = RuntimeIntent(**runtime_intent_model_dict) + + # Verify the model instances are equivalent + assert runtime_intent_model == runtime_intent_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_intent_model_json2 = runtime_intent_model.to_dict() + assert runtime_intent_model_json2 == runtime_intent_model_json + + +class TestModel_StatusError: + """ + Test Class for StatusError + """ + + def test_status_error_serialization(self): + """ + Test serialization/deserialization for StatusError + """ + + # Construct a json representation of a StatusError model + status_error_model_json = {} + status_error_model_json['message'] = 'testString' + + # Construct a model instance of StatusError by calling from_dict on the json representation + status_error_model = StatusError.from_dict(status_error_model_json) + assert status_error_model != False + + # Construct a model instance of StatusError by calling from_dict on the json representation + status_error_model_dict = StatusError.from_dict(status_error_model_json).__dict__ + status_error_model2 = StatusError(**status_error_model_dict) + + # Verify the model instances are equivalent + assert status_error_model == status_error_model2 + + # Convert model instance back to dict and verify no loss of data + status_error_model_json2 = status_error_model.to_dict() + assert status_error_model_json2 == status_error_model_json + + +class TestModel_Synonym: + """ + Test Class for Synonym + """ + + def test_synonym_serialization(self): + """ + Test serialization/deserialization for Synonym + """ + + # Construct a json representation of a Synonym model + synonym_model_json = {} + synonym_model_json['synonym'] = 'testString' + + # Construct a model instance of Synonym by calling from_dict on the json representation + synonym_model = Synonym.from_dict(synonym_model_json) + assert synonym_model != False + + # Construct a model instance of Synonym by calling from_dict on the json representation + synonym_model_dict = Synonym.from_dict(synonym_model_json).__dict__ + synonym_model2 = Synonym(**synonym_model_dict) + + # Verify the model instances are equivalent + assert synonym_model == synonym_model2 + + # Convert model instance back to dict and verify no loss of data + synonym_model_json2 = synonym_model.to_dict() + assert synonym_model_json2 == synonym_model_json + + +class TestModel_SynonymCollection: + """ + Test Class for SynonymCollection + """ + + def test_synonym_collection_serialization(self): + """ + Test serialization/deserialization for SynonymCollection + """ + + # Construct dict forms of any model objects needed in order to build this model. + + synonym_model = {} # Synonym + synonym_model['synonym'] = 'testString' + + pagination_model = {} # Pagination + pagination_model['refresh_url'] = 'testString' + pagination_model['next_url'] = 'testString' + pagination_model['total'] = 38 + pagination_model['matched'] = 38 + pagination_model['refresh_cursor'] = 'testString' + pagination_model['next_cursor'] = 'testString' + + # Construct a json representation of a SynonymCollection model + synonym_collection_model_json = {} + synonym_collection_model_json['synonyms'] = [synonym_model] + synonym_collection_model_json['pagination'] = pagination_model + + # Construct a model instance of SynonymCollection by calling from_dict on the json representation + synonym_collection_model = SynonymCollection.from_dict(synonym_collection_model_json) + assert synonym_collection_model != False + + # Construct a model instance of SynonymCollection by calling from_dict on the json representation + synonym_collection_model_dict = SynonymCollection.from_dict(synonym_collection_model_json).__dict__ + synonym_collection_model2 = SynonymCollection(**synonym_collection_model_dict) + + # Verify the model instances are equivalent + assert synonym_collection_model == synonym_collection_model2 + + # Convert model instance back to dict and verify no loss of data + synonym_collection_model_json2 = synonym_collection_model.to_dict() + assert synonym_collection_model_json2 == synonym_collection_model_json + + +class TestModel_Value: + """ + Test Class for Value + """ + + def test_value_serialization(self): + """ + Test serialization/deserialization for Value + """ + + # Construct a json representation of a Value model + value_model_json = {} + value_model_json['value'] = 'testString' + value_model_json['metadata'] = {'anyKey': 'anyValue'} + value_model_json['type'] = 'synonyms' + value_model_json['synonyms'] = ['testString'] + value_model_json['patterns'] = ['testString'] + + # Construct a model instance of Value by calling from_dict on the json representation + value_model = Value.from_dict(value_model_json) + assert value_model != False + + # Construct a model instance of Value by calling from_dict on the json representation + value_model_dict = Value.from_dict(value_model_json).__dict__ + value_model2 = Value(**value_model_dict) + + # Verify the model instances are equivalent + assert value_model == value_model2 + + # Convert model instance back to dict and verify no loss of data + value_model_json2 = value_model.to_dict() + assert value_model_json2 == value_model_json + + +class TestModel_ValueCollection: + """ + Test Class for ValueCollection + """ + + def test_value_collection_serialization(self): + """ + Test serialization/deserialization for ValueCollection + """ + + # Construct dict forms of any model objects needed in order to build this model. + + value_model = {} # Value + value_model['value'] = 'testString' + value_model['metadata'] = {'anyKey': 'anyValue'} + value_model['type'] = 'synonyms' + value_model['synonyms'] = ['testString'] + value_model['patterns'] = ['testString'] + + pagination_model = {} # Pagination + pagination_model['refresh_url'] = 'testString' + pagination_model['next_url'] = 'testString' + pagination_model['total'] = 38 + pagination_model['matched'] = 38 + pagination_model['refresh_cursor'] = 'testString' + pagination_model['next_cursor'] = 'testString' + + # Construct a json representation of a ValueCollection model + value_collection_model_json = {} + value_collection_model_json['values'] = [value_model] + value_collection_model_json['pagination'] = pagination_model + + # Construct a model instance of ValueCollection by calling from_dict on the json representation + value_collection_model = ValueCollection.from_dict(value_collection_model_json) + assert value_collection_model != False + + # Construct a model instance of ValueCollection by calling from_dict on the json representation + value_collection_model_dict = ValueCollection.from_dict(value_collection_model_json).__dict__ + value_collection_model2 = ValueCollection(**value_collection_model_dict) + + # Verify the model instances are equivalent + assert value_collection_model == value_collection_model2 + + # Convert model instance back to dict and verify no loss of data + value_collection_model_json2 = value_collection_model.to_dict() + assert value_collection_model_json2 == value_collection_model_json + + +class TestModel_Webhook: + """ + Test Class for Webhook + """ + + def test_webhook_serialization(self): + """ + Test serialization/deserialization for Webhook + """ + + # Construct dict forms of any model objects needed in order to build this model. + + webhook_header_model = {} # WebhookHeader + webhook_header_model['name'] = 'testString' + webhook_header_model['value'] = 'testString' + + # Construct a json representation of a Webhook model + webhook_model_json = {} + webhook_model_json['url'] = 'testString' + webhook_model_json['name'] = 'testString' + webhook_model_json['headers'] = [webhook_header_model] + + # Construct a model instance of Webhook by calling from_dict on the json representation + webhook_model = Webhook.from_dict(webhook_model_json) + assert webhook_model != False + + # Construct a model instance of Webhook by calling from_dict on the json representation + webhook_model_dict = Webhook.from_dict(webhook_model_json).__dict__ + webhook_model2 = Webhook(**webhook_model_dict) + + # Verify the model instances are equivalent + assert webhook_model == webhook_model2 + + # Convert model instance back to dict and verify no loss of data + webhook_model_json2 = webhook_model.to_dict() + assert webhook_model_json2 == webhook_model_json + + +class TestModel_WebhookHeader: + """ + Test Class for WebhookHeader + """ + + def test_webhook_header_serialization(self): + """ + Test serialization/deserialization for WebhookHeader + """ + + # Construct a json representation of a WebhookHeader model + webhook_header_model_json = {} + webhook_header_model_json['name'] = 'testString' + webhook_header_model_json['value'] = 'testString' + + # Construct a model instance of WebhookHeader by calling from_dict on the json representation + webhook_header_model = WebhookHeader.from_dict(webhook_header_model_json) + assert webhook_header_model != False + + # Construct a model instance of WebhookHeader by calling from_dict on the json representation + webhook_header_model_dict = WebhookHeader.from_dict(webhook_header_model_json).__dict__ + webhook_header_model2 = WebhookHeader(**webhook_header_model_dict) + + # Verify the model instances are equivalent + assert webhook_header_model == webhook_header_model2 + + # Convert model instance back to dict and verify no loss of data + webhook_header_model_json2 = webhook_header_model.to_dict() + assert webhook_header_model_json2 == webhook_header_model_json + + +class TestModel_Workspace: + """ + Test Class for Workspace + """ + + def test_workspace_serialization(self): + """ + Test serialization/deserialization for Workspace + """ + + # Construct dict forms of any model objects needed in order to build this model. + + dialog_node_output_text_values_element_model = {} # DialogNodeOutputTextValuesElement + dialog_node_output_text_values_element_model['text'] = 'testString' + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'chat' + + dialog_node_output_generic_model = {} # DialogNodeOutputGenericDialogNodeOutputResponseTypeText + dialog_node_output_generic_model['response_type'] = 'text' + dialog_node_output_generic_model['values'] = [dialog_node_output_text_values_element_model] + dialog_node_output_generic_model['selection_policy'] = 'sequential' + dialog_node_output_generic_model['delimiter'] = '\\n' + dialog_node_output_generic_model['channels'] = [response_generic_channel_model] + + dialog_node_output_modifiers_model = {} # DialogNodeOutputModifiers + dialog_node_output_modifiers_model['overwrite'] = True + + dialog_node_output_model = {} # DialogNodeOutput + dialog_node_output_model['generic'] = [dialog_node_output_generic_model] + dialog_node_output_model['integrations'] = {'key1': {'anyKey': 'anyValue'}} + dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model + dialog_node_output_model['foo'] = 'testString' + + dialog_node_context_model = {} # DialogNodeContext + dialog_node_context_model['integrations'] = {'key1': {'anyKey': 'anyValue'}} + dialog_node_context_model['foo'] = 'testString' + + dialog_node_next_step_model = {} # DialogNodeNextStep + dialog_node_next_step_model['behavior'] = 'get_user_input' + dialog_node_next_step_model['dialog_node'] = 'testString' + dialog_node_next_step_model['selector'] = 'condition' + + dialog_node_action_model = {} # DialogNodeAction + dialog_node_action_model['name'] = 'testString' + dialog_node_action_model['type'] = 'client' + dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'} + dialog_node_action_model['result_variable'] = 'testString' + dialog_node_action_model['credentials'] = 'testString' + + dialog_node_model = {} # DialogNode + dialog_node_model['dialog_node'] = 'testString' + dialog_node_model['description'] = 'testString' + dialog_node_model['conditions'] = 'testString' + dialog_node_model['parent'] = 'testString' + dialog_node_model['previous_sibling'] = 'testString' + dialog_node_model['output'] = dialog_node_output_model + dialog_node_model['context'] = dialog_node_context_model + dialog_node_model['metadata'] = {'anyKey': 'anyValue'} + dialog_node_model['next_step'] = dialog_node_next_step_model + dialog_node_model['title'] = 'testString' + dialog_node_model['type'] = 'standard' + dialog_node_model['event_name'] = 'focus' + dialog_node_model['variable'] = 'testString' + dialog_node_model['actions'] = [dialog_node_action_model] + dialog_node_model['digress_in'] = 'not_available' + dialog_node_model['digress_out'] = 'allow_returning' + dialog_node_model['digress_out_slots'] = 'not_allowed' + dialog_node_model['user_label'] = 'testString' + dialog_node_model['disambiguation_opt_out'] = False + + counterexample_model = {} # Counterexample + counterexample_model['text'] = 'testString' + + workspace_system_settings_tooling_model = {} # WorkspaceSystemSettingsTooling + workspace_system_settings_tooling_model['store_generic_responses'] = True + + workspace_system_settings_disambiguation_model = {} # WorkspaceSystemSettingsDisambiguation + workspace_system_settings_disambiguation_model['prompt'] = 'testString' + workspace_system_settings_disambiguation_model['none_of_the_above_prompt'] = 'testString' + workspace_system_settings_disambiguation_model['enabled'] = False + workspace_system_settings_disambiguation_model['sensitivity'] = 'auto' + workspace_system_settings_disambiguation_model['randomize'] = True + workspace_system_settings_disambiguation_model['max_suggestions'] = 1 + workspace_system_settings_disambiguation_model['suggestion_text_policy'] = 'testString' + + workspace_system_settings_system_entities_model = {} # WorkspaceSystemSettingsSystemEntities + workspace_system_settings_system_entities_model['enabled'] = False + + workspace_system_settings_off_topic_model = {} # WorkspaceSystemSettingsOffTopic + workspace_system_settings_off_topic_model['enabled'] = False + + workspace_system_settings_nlp_model = {} # WorkspaceSystemSettingsNlp + workspace_system_settings_nlp_model['model'] = 'testString' + + workspace_system_settings_model = {} # WorkspaceSystemSettings + workspace_system_settings_model['tooling'] = workspace_system_settings_tooling_model + workspace_system_settings_model['disambiguation'] = workspace_system_settings_disambiguation_model + workspace_system_settings_model['human_agent_assist'] = {'anyKey': 'anyValue'} + workspace_system_settings_model['spelling_suggestions'] = False + workspace_system_settings_model['spelling_auto_correct'] = False + workspace_system_settings_model['system_entities'] = workspace_system_settings_system_entities_model + workspace_system_settings_model['off_topic'] = workspace_system_settings_off_topic_model + workspace_system_settings_model['nlp'] = workspace_system_settings_nlp_model + workspace_system_settings_model['foo'] = 'testString' + + webhook_header_model = {} # WebhookHeader + webhook_header_model['name'] = 'testString' + webhook_header_model['value'] = 'testString' + + webhook_model = {} # Webhook + webhook_model['url'] = 'testString' + webhook_model['name'] = 'testString' + webhook_model['headers'] = [webhook_header_model] + + mention_model = {} # Mention + mention_model['entity'] = 'testString' + mention_model['location'] = [38] + + example_model = {} # Example + example_model['text'] = 'testString' + example_model['mentions'] = [mention_model] + + intent_model = {} # Intent + intent_model['intent'] = 'testString' + intent_model['description'] = 'testString' + intent_model['examples'] = [example_model] + + value_model = {} # Value + value_model['value'] = 'testString' + value_model['metadata'] = {'anyKey': 'anyValue'} + value_model['type'] = 'synonyms' + value_model['synonyms'] = ['testString'] + value_model['patterns'] = ['testString'] + + entity_model = {} # Entity + entity_model['entity'] = 'testString' + entity_model['description'] = 'testString' + entity_model['metadata'] = {'anyKey': 'anyValue'} + entity_model['fuzzy_match'] = True + entity_model['values'] = [value_model] + + # Construct a json representation of a Workspace model + workspace_model_json = {} + workspace_model_json['name'] = 'testString' + workspace_model_json['description'] = 'testString' + workspace_model_json['language'] = 'testString' + workspace_model_json['dialog_nodes'] = [dialog_node_model] + workspace_model_json['counterexamples'] = [counterexample_model] + workspace_model_json['metadata'] = {'anyKey': 'anyValue'} + workspace_model_json['learning_opt_out'] = False + workspace_model_json['system_settings'] = workspace_system_settings_model + workspace_model_json['webhooks'] = [webhook_model] + workspace_model_json['intents'] = [intent_model] + workspace_model_json['entities'] = [entity_model] + + # Construct a model instance of Workspace by calling from_dict on the json representation + workspace_model = Workspace.from_dict(workspace_model_json) + assert workspace_model != False + + # Construct a model instance of Workspace by calling from_dict on the json representation + workspace_model_dict = Workspace.from_dict(workspace_model_json).__dict__ + workspace_model2 = Workspace(**workspace_model_dict) + + # Verify the model instances are equivalent + assert workspace_model == workspace_model2 + + # Convert model instance back to dict and verify no loss of data + workspace_model_json2 = workspace_model.to_dict() + assert workspace_model_json2 == workspace_model_json + + +class TestModel_WorkspaceCollection: + """ + Test Class for WorkspaceCollection + """ + + def test_workspace_collection_serialization(self): + """ + Test serialization/deserialization for WorkspaceCollection + """ + + # Construct dict forms of any model objects needed in order to build this model. + + dialog_node_output_text_values_element_model = {} # DialogNodeOutputTextValuesElement + dialog_node_output_text_values_element_model['text'] = 'testString' + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'chat' + + dialog_node_output_generic_model = {} # DialogNodeOutputGenericDialogNodeOutputResponseTypeText + dialog_node_output_generic_model['response_type'] = 'text' + dialog_node_output_generic_model['values'] = [dialog_node_output_text_values_element_model] + dialog_node_output_generic_model['selection_policy'] = 'sequential' + dialog_node_output_generic_model['delimiter'] = '\\n' + dialog_node_output_generic_model['channels'] = [response_generic_channel_model] + + dialog_node_output_modifiers_model = {} # DialogNodeOutputModifiers + dialog_node_output_modifiers_model['overwrite'] = True + + dialog_node_output_model = {} # DialogNodeOutput + dialog_node_output_model['generic'] = [dialog_node_output_generic_model] + dialog_node_output_model['integrations'] = {'key1': {'anyKey': 'anyValue'}} + dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model + dialog_node_output_model['foo'] = 'testString' + + dialog_node_context_model = {} # DialogNodeContext + dialog_node_context_model['integrations'] = {'key1': {'anyKey': 'anyValue'}} + dialog_node_context_model['foo'] = 'testString' + + dialog_node_next_step_model = {} # DialogNodeNextStep + dialog_node_next_step_model['behavior'] = 'get_user_input' + dialog_node_next_step_model['dialog_node'] = 'testString' + dialog_node_next_step_model['selector'] = 'condition' + + dialog_node_action_model = {} # DialogNodeAction + dialog_node_action_model['name'] = 'testString' + dialog_node_action_model['type'] = 'client' + dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'} + dialog_node_action_model['result_variable'] = 'testString' + dialog_node_action_model['credentials'] = 'testString' + + dialog_node_model = {} # DialogNode + dialog_node_model['dialog_node'] = 'testString' + dialog_node_model['description'] = 'testString' + dialog_node_model['conditions'] = 'testString' + dialog_node_model['parent'] = 'testString' + dialog_node_model['previous_sibling'] = 'testString' + dialog_node_model['output'] = dialog_node_output_model + dialog_node_model['context'] = dialog_node_context_model + dialog_node_model['metadata'] = {'anyKey': 'anyValue'} + dialog_node_model['next_step'] = dialog_node_next_step_model + dialog_node_model['title'] = 'testString' + dialog_node_model['type'] = 'standard' + dialog_node_model['event_name'] = 'focus' + dialog_node_model['variable'] = 'testString' + dialog_node_model['actions'] = [dialog_node_action_model] + dialog_node_model['digress_in'] = 'not_available' + dialog_node_model['digress_out'] = 'allow_returning' + dialog_node_model['digress_out_slots'] = 'not_allowed' + dialog_node_model['user_label'] = 'testString' + dialog_node_model['disambiguation_opt_out'] = False + + counterexample_model = {} # Counterexample + counterexample_model['text'] = 'testString' + + workspace_system_settings_tooling_model = {} # WorkspaceSystemSettingsTooling + workspace_system_settings_tooling_model['store_generic_responses'] = True + + workspace_system_settings_disambiguation_model = {} # WorkspaceSystemSettingsDisambiguation + workspace_system_settings_disambiguation_model['prompt'] = 'testString' + workspace_system_settings_disambiguation_model['none_of_the_above_prompt'] = 'testString' + workspace_system_settings_disambiguation_model['enabled'] = False + workspace_system_settings_disambiguation_model['sensitivity'] = 'auto' + workspace_system_settings_disambiguation_model['randomize'] = True + workspace_system_settings_disambiguation_model['max_suggestions'] = 1 + workspace_system_settings_disambiguation_model['suggestion_text_policy'] = 'testString' + + workspace_system_settings_system_entities_model = {} # WorkspaceSystemSettingsSystemEntities + workspace_system_settings_system_entities_model['enabled'] = False + + workspace_system_settings_off_topic_model = {} # WorkspaceSystemSettingsOffTopic + workspace_system_settings_off_topic_model['enabled'] = False + + workspace_system_settings_nlp_model = {} # WorkspaceSystemSettingsNlp + workspace_system_settings_nlp_model['model'] = 'testString' + + workspace_system_settings_model = {} # WorkspaceSystemSettings + workspace_system_settings_model['tooling'] = workspace_system_settings_tooling_model + workspace_system_settings_model['disambiguation'] = workspace_system_settings_disambiguation_model + workspace_system_settings_model['human_agent_assist'] = {'anyKey': 'anyValue'} + workspace_system_settings_model['spelling_suggestions'] = False + workspace_system_settings_model['spelling_auto_correct'] = False + workspace_system_settings_model['system_entities'] = workspace_system_settings_system_entities_model + workspace_system_settings_model['off_topic'] = workspace_system_settings_off_topic_model + workspace_system_settings_model['nlp'] = workspace_system_settings_nlp_model + workspace_system_settings_model['foo'] = 'testString' + + webhook_header_model = {} # WebhookHeader + webhook_header_model['name'] = 'testString' + webhook_header_model['value'] = 'testString' + + webhook_model = {} # Webhook + webhook_model['url'] = 'testString' + webhook_model['name'] = 'testString' + webhook_model['headers'] = [webhook_header_model] + + mention_model = {} # Mention + mention_model['entity'] = 'testString' + mention_model['location'] = [38] + + example_model = {} # Example + example_model['text'] = 'testString' + example_model['mentions'] = [mention_model] + + intent_model = {} # Intent + intent_model['intent'] = 'testString' + intent_model['description'] = 'testString' + intent_model['examples'] = [example_model] + + value_model = {} # Value + value_model['value'] = 'testString' + value_model['metadata'] = {'anyKey': 'anyValue'} + value_model['type'] = 'synonyms' + value_model['synonyms'] = ['testString'] + value_model['patterns'] = ['testString'] + + entity_model = {} # Entity + entity_model['entity'] = 'testString' + entity_model['description'] = 'testString' + entity_model['metadata'] = {'anyKey': 'anyValue'} + entity_model['fuzzy_match'] = True + entity_model['values'] = [value_model] + + workspace_model = {} # Workspace + workspace_model['name'] = 'testString' + workspace_model['description'] = 'testString' + workspace_model['language'] = 'testString' + workspace_model['dialog_nodes'] = [dialog_node_model] + workspace_model['counterexamples'] = [counterexample_model] + workspace_model['metadata'] = {'anyKey': 'anyValue'} + workspace_model['learning_opt_out'] = False + workspace_model['system_settings'] = workspace_system_settings_model + workspace_model['webhooks'] = [webhook_model] + workspace_model['intents'] = [intent_model] + workspace_model['entities'] = [entity_model] + + pagination_model = {} # Pagination + pagination_model['refresh_url'] = 'testString' + pagination_model['next_url'] = 'testString' + pagination_model['total'] = 38 + pagination_model['matched'] = 38 + pagination_model['refresh_cursor'] = 'testString' + pagination_model['next_cursor'] = 'testString' + + # Construct a json representation of a WorkspaceCollection model + workspace_collection_model_json = {} + workspace_collection_model_json['workspaces'] = [workspace_model] + workspace_collection_model_json['pagination'] = pagination_model + + # Construct a model instance of WorkspaceCollection by calling from_dict on the json representation + workspace_collection_model = WorkspaceCollection.from_dict(workspace_collection_model_json) + assert workspace_collection_model != False + + # Construct a model instance of WorkspaceCollection by calling from_dict on the json representation + workspace_collection_model_dict = WorkspaceCollection.from_dict(workspace_collection_model_json).__dict__ + workspace_collection_model2 = WorkspaceCollection(**workspace_collection_model_dict) + + # Verify the model instances are equivalent + assert workspace_collection_model == workspace_collection_model2 + + # Convert model instance back to dict and verify no loss of data + workspace_collection_model_json2 = workspace_collection_model.to_dict() + assert workspace_collection_model_json2 == workspace_collection_model_json + + +class TestModel_WorkspaceCounts: + """ + Test Class for WorkspaceCounts + """ + + def test_workspace_counts_serialization(self): + """ + Test serialization/deserialization for WorkspaceCounts + """ + + # Construct a json representation of a WorkspaceCounts model + workspace_counts_model_json = {} + workspace_counts_model_json['intent'] = 38 + workspace_counts_model_json['entity'] = 38 + workspace_counts_model_json['node'] = 38 + + # Construct a model instance of WorkspaceCounts by calling from_dict on the json representation + workspace_counts_model = WorkspaceCounts.from_dict(workspace_counts_model_json) + assert workspace_counts_model != False + + # Construct a model instance of WorkspaceCounts by calling from_dict on the json representation + workspace_counts_model_dict = WorkspaceCounts.from_dict(workspace_counts_model_json).__dict__ + workspace_counts_model2 = WorkspaceCounts(**workspace_counts_model_dict) + + # Verify the model instances are equivalent + assert workspace_counts_model == workspace_counts_model2 + + # Convert model instance back to dict and verify no loss of data + workspace_counts_model_json2 = workspace_counts_model.to_dict() + assert workspace_counts_model_json2 == workspace_counts_model_json + + +class TestModel_WorkspaceSystemSettings: + """ + Test Class for WorkspaceSystemSettings + """ + + def test_workspace_system_settings_serialization(self): + """ + Test serialization/deserialization for WorkspaceSystemSettings + """ + + # Construct dict forms of any model objects needed in order to build this model. + + workspace_system_settings_tooling_model = {} # WorkspaceSystemSettingsTooling + workspace_system_settings_tooling_model['store_generic_responses'] = True + + workspace_system_settings_disambiguation_model = {} # WorkspaceSystemSettingsDisambiguation + workspace_system_settings_disambiguation_model['prompt'] = 'testString' + workspace_system_settings_disambiguation_model['none_of_the_above_prompt'] = 'testString' + workspace_system_settings_disambiguation_model['enabled'] = False + workspace_system_settings_disambiguation_model['sensitivity'] = 'auto' + workspace_system_settings_disambiguation_model['randomize'] = True + workspace_system_settings_disambiguation_model['max_suggestions'] = 1 + workspace_system_settings_disambiguation_model['suggestion_text_policy'] = 'testString' + + workspace_system_settings_system_entities_model = {} # WorkspaceSystemSettingsSystemEntities + workspace_system_settings_system_entities_model['enabled'] = False + + workspace_system_settings_off_topic_model = {} # WorkspaceSystemSettingsOffTopic + workspace_system_settings_off_topic_model['enabled'] = False + + workspace_system_settings_nlp_model = {} # WorkspaceSystemSettingsNlp + workspace_system_settings_nlp_model['model'] = 'testString' + + # Construct a json representation of a WorkspaceSystemSettings model + workspace_system_settings_model_json = {} + workspace_system_settings_model_json['tooling'] = workspace_system_settings_tooling_model + workspace_system_settings_model_json['disambiguation'] = workspace_system_settings_disambiguation_model + workspace_system_settings_model_json['human_agent_assist'] = {'anyKey': 'anyValue'} + workspace_system_settings_model_json['spelling_suggestions'] = False + workspace_system_settings_model_json['spelling_auto_correct'] = False + workspace_system_settings_model_json['system_entities'] = workspace_system_settings_system_entities_model + workspace_system_settings_model_json['off_topic'] = workspace_system_settings_off_topic_model + workspace_system_settings_model_json['nlp'] = workspace_system_settings_nlp_model + workspace_system_settings_model_json['foo'] = 'testString' + + # Construct a model instance of WorkspaceSystemSettings by calling from_dict on the json representation + workspace_system_settings_model = WorkspaceSystemSettings.from_dict(workspace_system_settings_model_json) + assert workspace_system_settings_model != False + + # Construct a model instance of WorkspaceSystemSettings by calling from_dict on the json representation + workspace_system_settings_model_dict = WorkspaceSystemSettings.from_dict(workspace_system_settings_model_json).__dict__ + workspace_system_settings_model2 = WorkspaceSystemSettings(**workspace_system_settings_model_dict) + + # Verify the model instances are equivalent + assert workspace_system_settings_model == workspace_system_settings_model2 + + # Convert model instance back to dict and verify no loss of data + workspace_system_settings_model_json2 = workspace_system_settings_model.to_dict() + assert workspace_system_settings_model_json2 == workspace_system_settings_model_json + + # Test get_properties and set_properties methods. + workspace_system_settings_model.set_properties({}) + actual_dict = workspace_system_settings_model.get_properties() + assert actual_dict == {} + + expected_dict = {'foo': 'testString'} + workspace_system_settings_model.set_properties(expected_dict) + actual_dict = workspace_system_settings_model.get_properties() + assert actual_dict.keys() == expected_dict.keys() + + +class TestModel_WorkspaceSystemSettingsDisambiguation: + """ + Test Class for WorkspaceSystemSettingsDisambiguation + """ + + def test_workspace_system_settings_disambiguation_serialization(self): + """ + Test serialization/deserialization for WorkspaceSystemSettingsDisambiguation + """ + + # Construct a json representation of a WorkspaceSystemSettingsDisambiguation model + workspace_system_settings_disambiguation_model_json = {} + workspace_system_settings_disambiguation_model_json['prompt'] = 'testString' + workspace_system_settings_disambiguation_model_json['none_of_the_above_prompt'] = 'testString' + workspace_system_settings_disambiguation_model_json['enabled'] = False + workspace_system_settings_disambiguation_model_json['sensitivity'] = 'auto' + workspace_system_settings_disambiguation_model_json['randomize'] = True + workspace_system_settings_disambiguation_model_json['max_suggestions'] = 1 + workspace_system_settings_disambiguation_model_json['suggestion_text_policy'] = 'testString' + + # Construct a model instance of WorkspaceSystemSettingsDisambiguation by calling from_dict on the json representation + workspace_system_settings_disambiguation_model = WorkspaceSystemSettingsDisambiguation.from_dict(workspace_system_settings_disambiguation_model_json) + assert workspace_system_settings_disambiguation_model != False + + # Construct a model instance of WorkspaceSystemSettingsDisambiguation by calling from_dict on the json representation + workspace_system_settings_disambiguation_model_dict = WorkspaceSystemSettingsDisambiguation.from_dict(workspace_system_settings_disambiguation_model_json).__dict__ + workspace_system_settings_disambiguation_model2 = WorkspaceSystemSettingsDisambiguation(**workspace_system_settings_disambiguation_model_dict) + + # Verify the model instances are equivalent + assert workspace_system_settings_disambiguation_model == workspace_system_settings_disambiguation_model2 + + # Convert model instance back to dict and verify no loss of data + workspace_system_settings_disambiguation_model_json2 = workspace_system_settings_disambiguation_model.to_dict() + assert workspace_system_settings_disambiguation_model_json2 == workspace_system_settings_disambiguation_model_json + + +class TestModel_WorkspaceSystemSettingsNlp: + """ + Test Class for WorkspaceSystemSettingsNlp + """ + + def test_workspace_system_settings_nlp_serialization(self): + """ + Test serialization/deserialization for WorkspaceSystemSettingsNlp + """ + + # Construct a json representation of a WorkspaceSystemSettingsNlp model + workspace_system_settings_nlp_model_json = {} + workspace_system_settings_nlp_model_json['model'] = 'testString' + + # Construct a model instance of WorkspaceSystemSettingsNlp by calling from_dict on the json representation + workspace_system_settings_nlp_model = WorkspaceSystemSettingsNlp.from_dict(workspace_system_settings_nlp_model_json) + assert workspace_system_settings_nlp_model != False + + # Construct a model instance of WorkspaceSystemSettingsNlp by calling from_dict on the json representation + workspace_system_settings_nlp_model_dict = WorkspaceSystemSettingsNlp.from_dict(workspace_system_settings_nlp_model_json).__dict__ + workspace_system_settings_nlp_model2 = WorkspaceSystemSettingsNlp(**workspace_system_settings_nlp_model_dict) + + # Verify the model instances are equivalent + assert workspace_system_settings_nlp_model == workspace_system_settings_nlp_model2 + + # Convert model instance back to dict and verify no loss of data + workspace_system_settings_nlp_model_json2 = workspace_system_settings_nlp_model.to_dict() + assert workspace_system_settings_nlp_model_json2 == workspace_system_settings_nlp_model_json + + +class TestModel_WorkspaceSystemSettingsOffTopic: + """ + Test Class for WorkspaceSystemSettingsOffTopic + """ + + def test_workspace_system_settings_off_topic_serialization(self): + """ + Test serialization/deserialization for WorkspaceSystemSettingsOffTopic + """ + + # Construct a json representation of a WorkspaceSystemSettingsOffTopic model + workspace_system_settings_off_topic_model_json = {} + workspace_system_settings_off_topic_model_json['enabled'] = False + + # Construct a model instance of WorkspaceSystemSettingsOffTopic by calling from_dict on the json representation + workspace_system_settings_off_topic_model = WorkspaceSystemSettingsOffTopic.from_dict(workspace_system_settings_off_topic_model_json) + assert workspace_system_settings_off_topic_model != False + + # Construct a model instance of WorkspaceSystemSettingsOffTopic by calling from_dict on the json representation + workspace_system_settings_off_topic_model_dict = WorkspaceSystemSettingsOffTopic.from_dict(workspace_system_settings_off_topic_model_json).__dict__ + workspace_system_settings_off_topic_model2 = WorkspaceSystemSettingsOffTopic(**workspace_system_settings_off_topic_model_dict) + + # Verify the model instances are equivalent + assert workspace_system_settings_off_topic_model == workspace_system_settings_off_topic_model2 + + # Convert model instance back to dict and verify no loss of data + workspace_system_settings_off_topic_model_json2 = workspace_system_settings_off_topic_model.to_dict() + assert workspace_system_settings_off_topic_model_json2 == workspace_system_settings_off_topic_model_json + + +class TestModel_WorkspaceSystemSettingsSystemEntities: + """ + Test Class for WorkspaceSystemSettingsSystemEntities + """ + + def test_workspace_system_settings_system_entities_serialization(self): + """ + Test serialization/deserialization for WorkspaceSystemSettingsSystemEntities + """ + + # Construct a json representation of a WorkspaceSystemSettingsSystemEntities model + workspace_system_settings_system_entities_model_json = {} + workspace_system_settings_system_entities_model_json['enabled'] = False + + # Construct a model instance of WorkspaceSystemSettingsSystemEntities by calling from_dict on the json representation + workspace_system_settings_system_entities_model = WorkspaceSystemSettingsSystemEntities.from_dict(workspace_system_settings_system_entities_model_json) + assert workspace_system_settings_system_entities_model != False + + # Construct a model instance of WorkspaceSystemSettingsSystemEntities by calling from_dict on the json representation + workspace_system_settings_system_entities_model_dict = WorkspaceSystemSettingsSystemEntities.from_dict(workspace_system_settings_system_entities_model_json).__dict__ + workspace_system_settings_system_entities_model2 = WorkspaceSystemSettingsSystemEntities(**workspace_system_settings_system_entities_model_dict) + + # Verify the model instances are equivalent + assert workspace_system_settings_system_entities_model == workspace_system_settings_system_entities_model2 + + # Convert model instance back to dict and verify no loss of data + workspace_system_settings_system_entities_model_json2 = workspace_system_settings_system_entities_model.to_dict() + assert workspace_system_settings_system_entities_model_json2 == workspace_system_settings_system_entities_model_json + + +class TestModel_WorkspaceSystemSettingsTooling: + """ + Test Class for WorkspaceSystemSettingsTooling + """ + + def test_workspace_system_settings_tooling_serialization(self): + """ + Test serialization/deserialization for WorkspaceSystemSettingsTooling + """ + + # Construct a json representation of a WorkspaceSystemSettingsTooling model + workspace_system_settings_tooling_model_json = {} + workspace_system_settings_tooling_model_json['store_generic_responses'] = True + + # Construct a model instance of WorkspaceSystemSettingsTooling by calling from_dict on the json representation + workspace_system_settings_tooling_model = WorkspaceSystemSettingsTooling.from_dict(workspace_system_settings_tooling_model_json) + assert workspace_system_settings_tooling_model != False + + # Construct a model instance of WorkspaceSystemSettingsTooling by calling from_dict on the json representation + workspace_system_settings_tooling_model_dict = WorkspaceSystemSettingsTooling.from_dict(workspace_system_settings_tooling_model_json).__dict__ + workspace_system_settings_tooling_model2 = WorkspaceSystemSettingsTooling(**workspace_system_settings_tooling_model_dict) + + # Verify the model instances are equivalent + assert workspace_system_settings_tooling_model == workspace_system_settings_tooling_model2 + + # Convert model instance back to dict and verify no loss of data + workspace_system_settings_tooling_model_json2 = workspace_system_settings_tooling_model.to_dict() + assert workspace_system_settings_tooling_model_json2 == workspace_system_settings_tooling_model_json + + +class TestModel_DialogNodeOutputGenericDialogNodeOutputResponseTypeAudio: + """ + Test Class for DialogNodeOutputGenericDialogNodeOutputResponseTypeAudio + """ + + def test_dialog_node_output_generic_dialog_node_output_response_type_audio_serialization(self): + """ + Test serialization/deserialization for DialogNodeOutputGenericDialogNodeOutputResponseTypeAudio + """ + + # Construct dict forms of any model objects needed in order to build this model. + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'chat' + + # Construct a json representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeAudio model + dialog_node_output_generic_dialog_node_output_response_type_audio_model_json = {} + dialog_node_output_generic_dialog_node_output_response_type_audio_model_json['response_type'] = 'audio' + dialog_node_output_generic_dialog_node_output_response_type_audio_model_json['source'] = 'testString' + dialog_node_output_generic_dialog_node_output_response_type_audio_model_json['title'] = 'testString' + dialog_node_output_generic_dialog_node_output_response_type_audio_model_json['description'] = 'testString' + dialog_node_output_generic_dialog_node_output_response_type_audio_model_json['channels'] = [response_generic_channel_model] + dialog_node_output_generic_dialog_node_output_response_type_audio_model_json['channel_options'] = {'anyKey': 'anyValue'} + dialog_node_output_generic_dialog_node_output_response_type_audio_model_json['alt_text'] = 'testString' + + # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeAudio by calling from_dict on the json representation + dialog_node_output_generic_dialog_node_output_response_type_audio_model = DialogNodeOutputGenericDialogNodeOutputResponseTypeAudio.from_dict(dialog_node_output_generic_dialog_node_output_response_type_audio_model_json) + assert dialog_node_output_generic_dialog_node_output_response_type_audio_model != False + + # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeAudio by calling from_dict on the json representation + dialog_node_output_generic_dialog_node_output_response_type_audio_model_dict = DialogNodeOutputGenericDialogNodeOutputResponseTypeAudio.from_dict(dialog_node_output_generic_dialog_node_output_response_type_audio_model_json).__dict__ + dialog_node_output_generic_dialog_node_output_response_type_audio_model2 = DialogNodeOutputGenericDialogNodeOutputResponseTypeAudio(**dialog_node_output_generic_dialog_node_output_response_type_audio_model_dict) + + # Verify the model instances are equivalent + assert dialog_node_output_generic_dialog_node_output_response_type_audio_model == dialog_node_output_generic_dialog_node_output_response_type_audio_model2 + + # Convert model instance back to dict and verify no loss of data + dialog_node_output_generic_dialog_node_output_response_type_audio_model_json2 = dialog_node_output_generic_dialog_node_output_response_type_audio_model.to_dict() + assert dialog_node_output_generic_dialog_node_output_response_type_audio_model_json2 == dialog_node_output_generic_dialog_node_output_response_type_audio_model_json + + +class TestModel_DialogNodeOutputGenericDialogNodeOutputResponseTypeChannelTransfer: + """ + Test Class for DialogNodeOutputGenericDialogNodeOutputResponseTypeChannelTransfer + """ + + def test_dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_serialization(self): + """ + Test serialization/deserialization for DialogNodeOutputGenericDialogNodeOutputResponseTypeChannelTransfer + """ + + # Construct dict forms of any model objects needed in order to build this model. + + channel_transfer_target_chat_model = {} # ChannelTransferTargetChat + channel_transfer_target_chat_model['url'] = 'testString' + + channel_transfer_target_model = {} # ChannelTransferTarget + channel_transfer_target_model['chat'] = channel_transfer_target_chat_model + + channel_transfer_info_model = {} # ChannelTransferInfo + channel_transfer_info_model['target'] = channel_transfer_target_model + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'chat' + + # Construct a json representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeChannelTransfer model + dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model_json = {} + dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model_json['response_type'] = 'channel_transfer' + dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model_json['message_to_user'] = 'testString' + dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model_json['transfer_info'] = channel_transfer_info_model + dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model_json['channels'] = [response_generic_channel_model] + + # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeChannelTransfer by calling from_dict on the json representation + dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model = DialogNodeOutputGenericDialogNodeOutputResponseTypeChannelTransfer.from_dict(dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model_json) + assert dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model != False + + # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeChannelTransfer by calling from_dict on the json representation + dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model_dict = DialogNodeOutputGenericDialogNodeOutputResponseTypeChannelTransfer.from_dict(dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model_json).__dict__ + dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model2 = DialogNodeOutputGenericDialogNodeOutputResponseTypeChannelTransfer(**dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model_dict) + + # Verify the model instances are equivalent + assert dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model == dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model2 + + # Convert model instance back to dict and verify no loss of data + dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model_json2 = dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model.to_dict() + assert dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model_json2 == dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model_json + + +class TestModel_DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent: + """ + Test Class for DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent + """ + + def test_dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_serialization(self): + """ + Test serialization/deserialization for DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent + """ + + # Construct dict forms of any model objects needed in order to build this model. + + agent_availability_message_model = {} # AgentAvailabilityMessage + agent_availability_message_model['message'] = 'testString' + + dialog_node_output_connect_to_agent_transfer_info_model = {} # DialogNodeOutputConnectToAgentTransferInfo + dialog_node_output_connect_to_agent_transfer_info_model['target'] = {'key1': {'anyKey': 'anyValue'}} + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'chat' + + # Construct a json representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent model + dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model_json = {} + dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model_json['response_type'] = 'connect_to_agent' + dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model_json['message_to_human_agent'] = 'testString' + dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model_json['agent_available'] = agent_availability_message_model + dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model_json['agent_unavailable'] = agent_availability_message_model + dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model_json['transfer_info'] = dialog_node_output_connect_to_agent_transfer_info_model + dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model_json['channels'] = [response_generic_channel_model] + + # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent by calling from_dict on the json representation + dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model = DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent.from_dict(dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model_json) + assert dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model != False + + # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent by calling from_dict on the json representation + dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model_dict = DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent.from_dict(dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model_json).__dict__ + dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model2 = DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent(**dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model_dict) + + # Verify the model instances are equivalent + assert dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model == dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model2 + + # Convert model instance back to dict and verify no loss of data + dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model_json2 = dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model.to_dict() + assert dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model_json2 == dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model_json + + +class TestModel_DialogNodeOutputGenericDialogNodeOutputResponseTypeIframe: + """ + Test Class for DialogNodeOutputGenericDialogNodeOutputResponseTypeIframe + """ + + def test_dialog_node_output_generic_dialog_node_output_response_type_iframe_serialization(self): + """ + Test serialization/deserialization for DialogNodeOutputGenericDialogNodeOutputResponseTypeIframe + """ + + # Construct dict forms of any model objects needed in order to build this model. + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'chat' + + # Construct a json representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeIframe model + dialog_node_output_generic_dialog_node_output_response_type_iframe_model_json = {} + dialog_node_output_generic_dialog_node_output_response_type_iframe_model_json['response_type'] = 'iframe' + dialog_node_output_generic_dialog_node_output_response_type_iframe_model_json['source'] = 'testString' + dialog_node_output_generic_dialog_node_output_response_type_iframe_model_json['title'] = 'testString' + dialog_node_output_generic_dialog_node_output_response_type_iframe_model_json['description'] = 'testString' + dialog_node_output_generic_dialog_node_output_response_type_iframe_model_json['image_url'] = 'testString' + dialog_node_output_generic_dialog_node_output_response_type_iframe_model_json['channels'] = [response_generic_channel_model] + + # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeIframe by calling from_dict on the json representation + dialog_node_output_generic_dialog_node_output_response_type_iframe_model = DialogNodeOutputGenericDialogNodeOutputResponseTypeIframe.from_dict(dialog_node_output_generic_dialog_node_output_response_type_iframe_model_json) + assert dialog_node_output_generic_dialog_node_output_response_type_iframe_model != False + + # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeIframe by calling from_dict on the json representation + dialog_node_output_generic_dialog_node_output_response_type_iframe_model_dict = DialogNodeOutputGenericDialogNodeOutputResponseTypeIframe.from_dict(dialog_node_output_generic_dialog_node_output_response_type_iframe_model_json).__dict__ + dialog_node_output_generic_dialog_node_output_response_type_iframe_model2 = DialogNodeOutputGenericDialogNodeOutputResponseTypeIframe(**dialog_node_output_generic_dialog_node_output_response_type_iframe_model_dict) + + # Verify the model instances are equivalent + assert dialog_node_output_generic_dialog_node_output_response_type_iframe_model == dialog_node_output_generic_dialog_node_output_response_type_iframe_model2 + + # Convert model instance back to dict and verify no loss of data + dialog_node_output_generic_dialog_node_output_response_type_iframe_model_json2 = dialog_node_output_generic_dialog_node_output_response_type_iframe_model.to_dict() + assert dialog_node_output_generic_dialog_node_output_response_type_iframe_model_json2 == dialog_node_output_generic_dialog_node_output_response_type_iframe_model_json + + +class TestModel_DialogNodeOutputGenericDialogNodeOutputResponseTypeImage: + """ + Test Class for DialogNodeOutputGenericDialogNodeOutputResponseTypeImage + """ + + def test_dialog_node_output_generic_dialog_node_output_response_type_image_serialization(self): + """ + Test serialization/deserialization for DialogNodeOutputGenericDialogNodeOutputResponseTypeImage + """ + + # Construct dict forms of any model objects needed in order to build this model. + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'chat' + + # Construct a json representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeImage model + dialog_node_output_generic_dialog_node_output_response_type_image_model_json = {} + dialog_node_output_generic_dialog_node_output_response_type_image_model_json['response_type'] = 'image' + dialog_node_output_generic_dialog_node_output_response_type_image_model_json['source'] = 'testString' + dialog_node_output_generic_dialog_node_output_response_type_image_model_json['title'] = 'testString' + dialog_node_output_generic_dialog_node_output_response_type_image_model_json['description'] = 'testString' + dialog_node_output_generic_dialog_node_output_response_type_image_model_json['channels'] = [response_generic_channel_model] + dialog_node_output_generic_dialog_node_output_response_type_image_model_json['alt_text'] = 'testString' + + # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeImage by calling from_dict on the json representation + dialog_node_output_generic_dialog_node_output_response_type_image_model = DialogNodeOutputGenericDialogNodeOutputResponseTypeImage.from_dict(dialog_node_output_generic_dialog_node_output_response_type_image_model_json) + assert dialog_node_output_generic_dialog_node_output_response_type_image_model != False + + # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeImage by calling from_dict on the json representation + dialog_node_output_generic_dialog_node_output_response_type_image_model_dict = DialogNodeOutputGenericDialogNodeOutputResponseTypeImage.from_dict(dialog_node_output_generic_dialog_node_output_response_type_image_model_json).__dict__ + dialog_node_output_generic_dialog_node_output_response_type_image_model2 = DialogNodeOutputGenericDialogNodeOutputResponseTypeImage(**dialog_node_output_generic_dialog_node_output_response_type_image_model_dict) + + # Verify the model instances are equivalent + assert dialog_node_output_generic_dialog_node_output_response_type_image_model == dialog_node_output_generic_dialog_node_output_response_type_image_model2 + + # Convert model instance back to dict and verify no loss of data + dialog_node_output_generic_dialog_node_output_response_type_image_model_json2 = dialog_node_output_generic_dialog_node_output_response_type_image_model.to_dict() + assert dialog_node_output_generic_dialog_node_output_response_type_image_model_json2 == dialog_node_output_generic_dialog_node_output_response_type_image_model_json + + +class TestModel_DialogNodeOutputGenericDialogNodeOutputResponseTypeOption: + """ + Test Class for DialogNodeOutputGenericDialogNodeOutputResponseTypeOption + """ + + def test_dialog_node_output_generic_dialog_node_output_response_type_option_serialization(self): + """ + Test serialization/deserialization for DialogNodeOutputGenericDialogNodeOutputResponseTypeOption + """ + + # Construct dict forms of any model objects needed in order to build this model. + + message_input_model = {} # MessageInput + message_input_model['text'] = 'testString' + message_input_model['spelling_suggestions'] = False + message_input_model['spelling_auto_correct'] = False + message_input_model['foo'] = 'testString' + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + + dialog_node_output_options_element_value_model = {} # DialogNodeOutputOptionsElementValue + dialog_node_output_options_element_value_model['input'] = message_input_model + dialog_node_output_options_element_value_model['intents'] = [runtime_intent_model] + dialog_node_output_options_element_value_model['entities'] = [runtime_entity_model] + + dialog_node_output_options_element_model = {} # DialogNodeOutputOptionsElement + dialog_node_output_options_element_model['label'] = 'testString' + dialog_node_output_options_element_model['value'] = dialog_node_output_options_element_value_model + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'chat' + + # Construct a json representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeOption model + dialog_node_output_generic_dialog_node_output_response_type_option_model_json = {} + dialog_node_output_generic_dialog_node_output_response_type_option_model_json['response_type'] = 'option' + dialog_node_output_generic_dialog_node_output_response_type_option_model_json['title'] = 'testString' + dialog_node_output_generic_dialog_node_output_response_type_option_model_json['description'] = 'testString' + dialog_node_output_generic_dialog_node_output_response_type_option_model_json['preference'] = 'dropdown' + dialog_node_output_generic_dialog_node_output_response_type_option_model_json['options'] = [dialog_node_output_options_element_model] + dialog_node_output_generic_dialog_node_output_response_type_option_model_json['channels'] = [response_generic_channel_model] + + # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeOption by calling from_dict on the json representation + dialog_node_output_generic_dialog_node_output_response_type_option_model = DialogNodeOutputGenericDialogNodeOutputResponseTypeOption.from_dict(dialog_node_output_generic_dialog_node_output_response_type_option_model_json) + assert dialog_node_output_generic_dialog_node_output_response_type_option_model != False + + # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeOption by calling from_dict on the json representation + dialog_node_output_generic_dialog_node_output_response_type_option_model_dict = DialogNodeOutputGenericDialogNodeOutputResponseTypeOption.from_dict(dialog_node_output_generic_dialog_node_output_response_type_option_model_json).__dict__ + dialog_node_output_generic_dialog_node_output_response_type_option_model2 = DialogNodeOutputGenericDialogNodeOutputResponseTypeOption(**dialog_node_output_generic_dialog_node_output_response_type_option_model_dict) + + # Verify the model instances are equivalent + assert dialog_node_output_generic_dialog_node_output_response_type_option_model == dialog_node_output_generic_dialog_node_output_response_type_option_model2 + + # Convert model instance back to dict and verify no loss of data + dialog_node_output_generic_dialog_node_output_response_type_option_model_json2 = dialog_node_output_generic_dialog_node_output_response_type_option_model.to_dict() + assert dialog_node_output_generic_dialog_node_output_response_type_option_model_json2 == dialog_node_output_generic_dialog_node_output_response_type_option_model_json + + +class TestModel_DialogNodeOutputGenericDialogNodeOutputResponseTypePause: + """ + Test Class for DialogNodeOutputGenericDialogNodeOutputResponseTypePause + """ + + def test_dialog_node_output_generic_dialog_node_output_response_type_pause_serialization(self): + """ + Test serialization/deserialization for DialogNodeOutputGenericDialogNodeOutputResponseTypePause + """ + + # Construct dict forms of any model objects needed in order to build this model. + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'chat' + + # Construct a json representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypePause model + dialog_node_output_generic_dialog_node_output_response_type_pause_model_json = {} + dialog_node_output_generic_dialog_node_output_response_type_pause_model_json['response_type'] = 'pause' + dialog_node_output_generic_dialog_node_output_response_type_pause_model_json['time'] = 38 + dialog_node_output_generic_dialog_node_output_response_type_pause_model_json['typing'] = True + dialog_node_output_generic_dialog_node_output_response_type_pause_model_json['channels'] = [response_generic_channel_model] + + # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypePause by calling from_dict on the json representation + dialog_node_output_generic_dialog_node_output_response_type_pause_model = DialogNodeOutputGenericDialogNodeOutputResponseTypePause.from_dict(dialog_node_output_generic_dialog_node_output_response_type_pause_model_json) + assert dialog_node_output_generic_dialog_node_output_response_type_pause_model != False + + # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypePause by calling from_dict on the json representation + dialog_node_output_generic_dialog_node_output_response_type_pause_model_dict = DialogNodeOutputGenericDialogNodeOutputResponseTypePause.from_dict(dialog_node_output_generic_dialog_node_output_response_type_pause_model_json).__dict__ + dialog_node_output_generic_dialog_node_output_response_type_pause_model2 = DialogNodeOutputGenericDialogNodeOutputResponseTypePause(**dialog_node_output_generic_dialog_node_output_response_type_pause_model_dict) + + # Verify the model instances are equivalent + assert dialog_node_output_generic_dialog_node_output_response_type_pause_model == dialog_node_output_generic_dialog_node_output_response_type_pause_model2 + + # Convert model instance back to dict and verify no loss of data + dialog_node_output_generic_dialog_node_output_response_type_pause_model_json2 = dialog_node_output_generic_dialog_node_output_response_type_pause_model.to_dict() + assert dialog_node_output_generic_dialog_node_output_response_type_pause_model_json2 == dialog_node_output_generic_dialog_node_output_response_type_pause_model_json + + +class TestModel_DialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill: + """ + Test Class for DialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill + """ + + def test_dialog_node_output_generic_dialog_node_output_response_type_search_skill_serialization(self): + """ + Test serialization/deserialization for DialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill + """ + + # Construct dict forms of any model objects needed in order to build this model. + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'chat' + + # Construct a json representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill model + dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_json = {} + dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_json['response_type'] = 'search_skill' + dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_json['query'] = 'testString' + dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_json['query_type'] = 'natural_language' + dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_json['filter'] = 'testString' + dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_json['discovery_version'] = '2018-12-03' + dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_json['channels'] = [response_generic_channel_model] + + # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill by calling from_dict on the json representation + dialog_node_output_generic_dialog_node_output_response_type_search_skill_model = DialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill.from_dict(dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_json) + assert dialog_node_output_generic_dialog_node_output_response_type_search_skill_model != False + + # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill by calling from_dict on the json representation + dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_dict = DialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill.from_dict(dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_json).__dict__ + dialog_node_output_generic_dialog_node_output_response_type_search_skill_model2 = DialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill(**dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_dict) + + # Verify the model instances are equivalent + assert dialog_node_output_generic_dialog_node_output_response_type_search_skill_model == dialog_node_output_generic_dialog_node_output_response_type_search_skill_model2 + + # Convert model instance back to dict and verify no loss of data + dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_json2 = dialog_node_output_generic_dialog_node_output_response_type_search_skill_model.to_dict() + assert dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_json2 == dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_json + + +class TestModel_DialogNodeOutputGenericDialogNodeOutputResponseTypeText: + """ + Test Class for DialogNodeOutputGenericDialogNodeOutputResponseTypeText + """ + + def test_dialog_node_output_generic_dialog_node_output_response_type_text_serialization(self): + """ + Test serialization/deserialization for DialogNodeOutputGenericDialogNodeOutputResponseTypeText + """ + + # Construct dict forms of any model objects needed in order to build this model. + + dialog_node_output_text_values_element_model = {} # DialogNodeOutputTextValuesElement + dialog_node_output_text_values_element_model['text'] = 'testString' + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'chat' + + # Construct a json representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeText model + dialog_node_output_generic_dialog_node_output_response_type_text_model_json = {} + dialog_node_output_generic_dialog_node_output_response_type_text_model_json['response_type'] = 'text' + dialog_node_output_generic_dialog_node_output_response_type_text_model_json['values'] = [dialog_node_output_text_values_element_model] + dialog_node_output_generic_dialog_node_output_response_type_text_model_json['selection_policy'] = 'sequential' + dialog_node_output_generic_dialog_node_output_response_type_text_model_json['delimiter'] = '\\n' + dialog_node_output_generic_dialog_node_output_response_type_text_model_json['channels'] = [response_generic_channel_model] + + # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeText by calling from_dict on the json representation + dialog_node_output_generic_dialog_node_output_response_type_text_model = DialogNodeOutputGenericDialogNodeOutputResponseTypeText.from_dict(dialog_node_output_generic_dialog_node_output_response_type_text_model_json) + assert dialog_node_output_generic_dialog_node_output_response_type_text_model != False + + # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeText by calling from_dict on the json representation + dialog_node_output_generic_dialog_node_output_response_type_text_model_dict = DialogNodeOutputGenericDialogNodeOutputResponseTypeText.from_dict(dialog_node_output_generic_dialog_node_output_response_type_text_model_json).__dict__ + dialog_node_output_generic_dialog_node_output_response_type_text_model2 = DialogNodeOutputGenericDialogNodeOutputResponseTypeText(**dialog_node_output_generic_dialog_node_output_response_type_text_model_dict) + + # Verify the model instances are equivalent + assert dialog_node_output_generic_dialog_node_output_response_type_text_model == dialog_node_output_generic_dialog_node_output_response_type_text_model2 + + # Convert model instance back to dict and verify no loss of data + dialog_node_output_generic_dialog_node_output_response_type_text_model_json2 = dialog_node_output_generic_dialog_node_output_response_type_text_model.to_dict() + assert dialog_node_output_generic_dialog_node_output_response_type_text_model_json2 == dialog_node_output_generic_dialog_node_output_response_type_text_model_json + + +class TestModel_DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined: + """ + Test Class for DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined + """ + + def test_dialog_node_output_generic_dialog_node_output_response_type_user_defined_serialization(self): + """ + Test serialization/deserialization for DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined + """ + + # Construct dict forms of any model objects needed in order to build this model. + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'chat' + + # Construct a json representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined model + dialog_node_output_generic_dialog_node_output_response_type_user_defined_model_json = {} + dialog_node_output_generic_dialog_node_output_response_type_user_defined_model_json['response_type'] = 'user_defined' + dialog_node_output_generic_dialog_node_output_response_type_user_defined_model_json['user_defined'] = {'anyKey': 'anyValue'} + dialog_node_output_generic_dialog_node_output_response_type_user_defined_model_json['channels'] = [response_generic_channel_model] + + # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined by calling from_dict on the json representation + dialog_node_output_generic_dialog_node_output_response_type_user_defined_model = DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined.from_dict(dialog_node_output_generic_dialog_node_output_response_type_user_defined_model_json) + assert dialog_node_output_generic_dialog_node_output_response_type_user_defined_model != False + + # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined by calling from_dict on the json representation + dialog_node_output_generic_dialog_node_output_response_type_user_defined_model_dict = DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined.from_dict(dialog_node_output_generic_dialog_node_output_response_type_user_defined_model_json).__dict__ + dialog_node_output_generic_dialog_node_output_response_type_user_defined_model2 = DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined(**dialog_node_output_generic_dialog_node_output_response_type_user_defined_model_dict) + + # Verify the model instances are equivalent + assert dialog_node_output_generic_dialog_node_output_response_type_user_defined_model == dialog_node_output_generic_dialog_node_output_response_type_user_defined_model2 + + # Convert model instance back to dict and verify no loss of data + dialog_node_output_generic_dialog_node_output_response_type_user_defined_model_json2 = dialog_node_output_generic_dialog_node_output_response_type_user_defined_model.to_dict() + assert dialog_node_output_generic_dialog_node_output_response_type_user_defined_model_json2 == dialog_node_output_generic_dialog_node_output_response_type_user_defined_model_json + + +class TestModel_DialogNodeOutputGenericDialogNodeOutputResponseTypeVideo: + """ + Test Class for DialogNodeOutputGenericDialogNodeOutputResponseTypeVideo + """ + + def test_dialog_node_output_generic_dialog_node_output_response_type_video_serialization(self): + """ + Test serialization/deserialization for DialogNodeOutputGenericDialogNodeOutputResponseTypeVideo + """ + + # Construct dict forms of any model objects needed in order to build this model. + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'chat' + + # Construct a json representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeVideo model + dialog_node_output_generic_dialog_node_output_response_type_video_model_json = {} + dialog_node_output_generic_dialog_node_output_response_type_video_model_json['response_type'] = 'video' + dialog_node_output_generic_dialog_node_output_response_type_video_model_json['source'] = 'testString' + dialog_node_output_generic_dialog_node_output_response_type_video_model_json['title'] = 'testString' + dialog_node_output_generic_dialog_node_output_response_type_video_model_json['description'] = 'testString' + dialog_node_output_generic_dialog_node_output_response_type_video_model_json['channels'] = [response_generic_channel_model] + dialog_node_output_generic_dialog_node_output_response_type_video_model_json['channel_options'] = {'anyKey': 'anyValue'} + dialog_node_output_generic_dialog_node_output_response_type_video_model_json['alt_text'] = 'testString' + + # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeVideo by calling from_dict on the json representation + dialog_node_output_generic_dialog_node_output_response_type_video_model = DialogNodeOutputGenericDialogNodeOutputResponseTypeVideo.from_dict(dialog_node_output_generic_dialog_node_output_response_type_video_model_json) + assert dialog_node_output_generic_dialog_node_output_response_type_video_model != False + + # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeVideo by calling from_dict on the json representation + dialog_node_output_generic_dialog_node_output_response_type_video_model_dict = DialogNodeOutputGenericDialogNodeOutputResponseTypeVideo.from_dict(dialog_node_output_generic_dialog_node_output_response_type_video_model_json).__dict__ + dialog_node_output_generic_dialog_node_output_response_type_video_model2 = DialogNodeOutputGenericDialogNodeOutputResponseTypeVideo(**dialog_node_output_generic_dialog_node_output_response_type_video_model_dict) + + # Verify the model instances are equivalent + assert dialog_node_output_generic_dialog_node_output_response_type_video_model == dialog_node_output_generic_dialog_node_output_response_type_video_model2 + + # Convert model instance back to dict and verify no loss of data + dialog_node_output_generic_dialog_node_output_response_type_video_model_json2 = dialog_node_output_generic_dialog_node_output_response_type_video_model.to_dict() + assert dialog_node_output_generic_dialog_node_output_response_type_video_model_json2 == dialog_node_output_generic_dialog_node_output_response_type_video_model_json + + +class TestModel_RuntimeResponseGenericRuntimeResponseTypeAudio: + """ + Test Class for RuntimeResponseGenericRuntimeResponseTypeAudio + """ + + def test_runtime_response_generic_runtime_response_type_audio_serialization(self): + """ + Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeAudio + """ + + # Construct dict forms of any model objects needed in order to build this model. + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'chat' + + # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeAudio model + runtime_response_generic_runtime_response_type_audio_model_json = {} + runtime_response_generic_runtime_response_type_audio_model_json['response_type'] = 'audio' + runtime_response_generic_runtime_response_type_audio_model_json['source'] = 'testString' + runtime_response_generic_runtime_response_type_audio_model_json['title'] = 'testString' + runtime_response_generic_runtime_response_type_audio_model_json['description'] = 'testString' + runtime_response_generic_runtime_response_type_audio_model_json['channels'] = [response_generic_channel_model] + runtime_response_generic_runtime_response_type_audio_model_json['channel_options'] = {'anyKey': 'anyValue'} + runtime_response_generic_runtime_response_type_audio_model_json['alt_text'] = 'testString' + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeAudio by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_audio_model = RuntimeResponseGenericRuntimeResponseTypeAudio.from_dict(runtime_response_generic_runtime_response_type_audio_model_json) + assert runtime_response_generic_runtime_response_type_audio_model != False + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeAudio by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_audio_model_dict = RuntimeResponseGenericRuntimeResponseTypeAudio.from_dict(runtime_response_generic_runtime_response_type_audio_model_json).__dict__ + runtime_response_generic_runtime_response_type_audio_model2 = RuntimeResponseGenericRuntimeResponseTypeAudio(**runtime_response_generic_runtime_response_type_audio_model_dict) + + # Verify the model instances are equivalent + assert runtime_response_generic_runtime_response_type_audio_model == runtime_response_generic_runtime_response_type_audio_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_response_generic_runtime_response_type_audio_model_json2 = runtime_response_generic_runtime_response_type_audio_model.to_dict() + assert runtime_response_generic_runtime_response_type_audio_model_json2 == runtime_response_generic_runtime_response_type_audio_model_json + + +class TestModel_RuntimeResponseGenericRuntimeResponseTypeChannelTransfer: + """ + Test Class for RuntimeResponseGenericRuntimeResponseTypeChannelTransfer + """ + + def test_runtime_response_generic_runtime_response_type_channel_transfer_serialization(self): + """ + Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeChannelTransfer + """ + + # Construct dict forms of any model objects needed in order to build this model. + + channel_transfer_target_chat_model = {} # ChannelTransferTargetChat + channel_transfer_target_chat_model['url'] = 'testString' + + channel_transfer_target_model = {} # ChannelTransferTarget + channel_transfer_target_model['chat'] = channel_transfer_target_chat_model + + channel_transfer_info_model = {} # ChannelTransferInfo + channel_transfer_info_model['target'] = channel_transfer_target_model + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'chat' + + # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeChannelTransfer model + runtime_response_generic_runtime_response_type_channel_transfer_model_json = {} + runtime_response_generic_runtime_response_type_channel_transfer_model_json['response_type'] = 'channel_transfer' + runtime_response_generic_runtime_response_type_channel_transfer_model_json['message_to_user'] = 'testString' + runtime_response_generic_runtime_response_type_channel_transfer_model_json['transfer_info'] = channel_transfer_info_model + runtime_response_generic_runtime_response_type_channel_transfer_model_json['channels'] = [response_generic_channel_model] + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeChannelTransfer by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_channel_transfer_model = RuntimeResponseGenericRuntimeResponseTypeChannelTransfer.from_dict(runtime_response_generic_runtime_response_type_channel_transfer_model_json) + assert runtime_response_generic_runtime_response_type_channel_transfer_model != False + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeChannelTransfer by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_channel_transfer_model_dict = RuntimeResponseGenericRuntimeResponseTypeChannelTransfer.from_dict(runtime_response_generic_runtime_response_type_channel_transfer_model_json).__dict__ + runtime_response_generic_runtime_response_type_channel_transfer_model2 = RuntimeResponseGenericRuntimeResponseTypeChannelTransfer(**runtime_response_generic_runtime_response_type_channel_transfer_model_dict) + + # Verify the model instances are equivalent + assert runtime_response_generic_runtime_response_type_channel_transfer_model == runtime_response_generic_runtime_response_type_channel_transfer_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_response_generic_runtime_response_type_channel_transfer_model_json2 = runtime_response_generic_runtime_response_type_channel_transfer_model.to_dict() + assert runtime_response_generic_runtime_response_type_channel_transfer_model_json2 == runtime_response_generic_runtime_response_type_channel_transfer_model_json + + +class TestModel_RuntimeResponseGenericRuntimeResponseTypeConnectToAgent: + """ + Test Class for RuntimeResponseGenericRuntimeResponseTypeConnectToAgent + """ + + def test_runtime_response_generic_runtime_response_type_connect_to_agent_serialization(self): + """ + Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeConnectToAgent + """ + + # Construct dict forms of any model objects needed in order to build this model. + + agent_availability_message_model = {} # AgentAvailabilityMessage + agent_availability_message_model['message'] = 'testString' + + dialog_node_output_connect_to_agent_transfer_info_model = {} # DialogNodeOutputConnectToAgentTransferInfo + dialog_node_output_connect_to_agent_transfer_info_model['target'] = {'key1': {'anyKey': 'anyValue'}} + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'chat' + + # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeConnectToAgent model + runtime_response_generic_runtime_response_type_connect_to_agent_model_json = {} + runtime_response_generic_runtime_response_type_connect_to_agent_model_json['response_type'] = 'connect_to_agent' + runtime_response_generic_runtime_response_type_connect_to_agent_model_json['message_to_human_agent'] = 'testString' + runtime_response_generic_runtime_response_type_connect_to_agent_model_json['agent_available'] = agent_availability_message_model + runtime_response_generic_runtime_response_type_connect_to_agent_model_json['agent_unavailable'] = agent_availability_message_model + runtime_response_generic_runtime_response_type_connect_to_agent_model_json['transfer_info'] = dialog_node_output_connect_to_agent_transfer_info_model + runtime_response_generic_runtime_response_type_connect_to_agent_model_json['topic'] = 'testString' + runtime_response_generic_runtime_response_type_connect_to_agent_model_json['dialog_node'] = 'testString' + runtime_response_generic_runtime_response_type_connect_to_agent_model_json['channels'] = [response_generic_channel_model] + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeConnectToAgent by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_connect_to_agent_model = RuntimeResponseGenericRuntimeResponseTypeConnectToAgent.from_dict(runtime_response_generic_runtime_response_type_connect_to_agent_model_json) + assert runtime_response_generic_runtime_response_type_connect_to_agent_model != False + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeConnectToAgent by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_connect_to_agent_model_dict = RuntimeResponseGenericRuntimeResponseTypeConnectToAgent.from_dict(runtime_response_generic_runtime_response_type_connect_to_agent_model_json).__dict__ + runtime_response_generic_runtime_response_type_connect_to_agent_model2 = RuntimeResponseGenericRuntimeResponseTypeConnectToAgent(**runtime_response_generic_runtime_response_type_connect_to_agent_model_dict) + + # Verify the model instances are equivalent + assert runtime_response_generic_runtime_response_type_connect_to_agent_model == runtime_response_generic_runtime_response_type_connect_to_agent_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_response_generic_runtime_response_type_connect_to_agent_model_json2 = runtime_response_generic_runtime_response_type_connect_to_agent_model.to_dict() + assert runtime_response_generic_runtime_response_type_connect_to_agent_model_json2 == runtime_response_generic_runtime_response_type_connect_to_agent_model_json + + +class TestModel_RuntimeResponseGenericRuntimeResponseTypeIframe: + """ + Test Class for RuntimeResponseGenericRuntimeResponseTypeIframe + """ + + def test_runtime_response_generic_runtime_response_type_iframe_serialization(self): + """ + Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeIframe + """ + + # Construct dict forms of any model objects needed in order to build this model. + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'chat' + + # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeIframe model + runtime_response_generic_runtime_response_type_iframe_model_json = {} + runtime_response_generic_runtime_response_type_iframe_model_json['response_type'] = 'iframe' + runtime_response_generic_runtime_response_type_iframe_model_json['source'] = 'testString' + runtime_response_generic_runtime_response_type_iframe_model_json['title'] = 'testString' + runtime_response_generic_runtime_response_type_iframe_model_json['description'] = 'testString' + runtime_response_generic_runtime_response_type_iframe_model_json['image_url'] = 'testString' + runtime_response_generic_runtime_response_type_iframe_model_json['channels'] = [response_generic_channel_model] + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeIframe by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_iframe_model = RuntimeResponseGenericRuntimeResponseTypeIframe.from_dict(runtime_response_generic_runtime_response_type_iframe_model_json) + assert runtime_response_generic_runtime_response_type_iframe_model != False + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeIframe by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_iframe_model_dict = RuntimeResponseGenericRuntimeResponseTypeIframe.from_dict(runtime_response_generic_runtime_response_type_iframe_model_json).__dict__ + runtime_response_generic_runtime_response_type_iframe_model2 = RuntimeResponseGenericRuntimeResponseTypeIframe(**runtime_response_generic_runtime_response_type_iframe_model_dict) + + # Verify the model instances are equivalent + assert runtime_response_generic_runtime_response_type_iframe_model == runtime_response_generic_runtime_response_type_iframe_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_response_generic_runtime_response_type_iframe_model_json2 = runtime_response_generic_runtime_response_type_iframe_model.to_dict() + assert runtime_response_generic_runtime_response_type_iframe_model_json2 == runtime_response_generic_runtime_response_type_iframe_model_json + + +class TestModel_RuntimeResponseGenericRuntimeResponseTypeImage: + """ + Test Class for RuntimeResponseGenericRuntimeResponseTypeImage + """ + + def test_runtime_response_generic_runtime_response_type_image_serialization(self): + """ + Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeImage + """ + + # Construct dict forms of any model objects needed in order to build this model. + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'chat' + + # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeImage model + runtime_response_generic_runtime_response_type_image_model_json = {} + runtime_response_generic_runtime_response_type_image_model_json['response_type'] = 'image' + runtime_response_generic_runtime_response_type_image_model_json['source'] = 'testString' + runtime_response_generic_runtime_response_type_image_model_json['title'] = 'testString' + runtime_response_generic_runtime_response_type_image_model_json['description'] = 'testString' + runtime_response_generic_runtime_response_type_image_model_json['channels'] = [response_generic_channel_model] + runtime_response_generic_runtime_response_type_image_model_json['alt_text'] = 'testString' + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeImage by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_image_model = RuntimeResponseGenericRuntimeResponseTypeImage.from_dict(runtime_response_generic_runtime_response_type_image_model_json) + assert runtime_response_generic_runtime_response_type_image_model != False + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeImage by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_image_model_dict = RuntimeResponseGenericRuntimeResponseTypeImage.from_dict(runtime_response_generic_runtime_response_type_image_model_json).__dict__ + runtime_response_generic_runtime_response_type_image_model2 = RuntimeResponseGenericRuntimeResponseTypeImage(**runtime_response_generic_runtime_response_type_image_model_dict) + + # Verify the model instances are equivalent + assert runtime_response_generic_runtime_response_type_image_model == runtime_response_generic_runtime_response_type_image_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_response_generic_runtime_response_type_image_model_json2 = runtime_response_generic_runtime_response_type_image_model.to_dict() + assert runtime_response_generic_runtime_response_type_image_model_json2 == runtime_response_generic_runtime_response_type_image_model_json + + +class TestModel_RuntimeResponseGenericRuntimeResponseTypeOption: + """ + Test Class for RuntimeResponseGenericRuntimeResponseTypeOption + """ + + def test_runtime_response_generic_runtime_response_type_option_serialization(self): + """ + Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeOption + """ + + # Construct dict forms of any model objects needed in order to build this model. + + message_input_model = {} # MessageInput + message_input_model['text'] = 'testString' + message_input_model['spelling_suggestions'] = False + message_input_model['spelling_auto_correct'] = False + message_input_model['foo'] = 'testString' + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + + dialog_node_output_options_element_value_model = {} # DialogNodeOutputOptionsElementValue + dialog_node_output_options_element_value_model['input'] = message_input_model + dialog_node_output_options_element_value_model['intents'] = [runtime_intent_model] + dialog_node_output_options_element_value_model['entities'] = [runtime_entity_model] + + dialog_node_output_options_element_model = {} # DialogNodeOutputOptionsElement + dialog_node_output_options_element_model['label'] = 'testString' + dialog_node_output_options_element_model['value'] = dialog_node_output_options_element_value_model + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'chat' + + # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeOption model + runtime_response_generic_runtime_response_type_option_model_json = {} + runtime_response_generic_runtime_response_type_option_model_json['response_type'] = 'option' + runtime_response_generic_runtime_response_type_option_model_json['title'] = 'testString' + runtime_response_generic_runtime_response_type_option_model_json['description'] = 'testString' + runtime_response_generic_runtime_response_type_option_model_json['preference'] = 'dropdown' + runtime_response_generic_runtime_response_type_option_model_json['options'] = [dialog_node_output_options_element_model] + runtime_response_generic_runtime_response_type_option_model_json['channels'] = [response_generic_channel_model] + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeOption by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_option_model = RuntimeResponseGenericRuntimeResponseTypeOption.from_dict(runtime_response_generic_runtime_response_type_option_model_json) + assert runtime_response_generic_runtime_response_type_option_model != False + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeOption by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_option_model_dict = RuntimeResponseGenericRuntimeResponseTypeOption.from_dict(runtime_response_generic_runtime_response_type_option_model_json).__dict__ + runtime_response_generic_runtime_response_type_option_model2 = RuntimeResponseGenericRuntimeResponseTypeOption(**runtime_response_generic_runtime_response_type_option_model_dict) + + # Verify the model instances are equivalent + assert runtime_response_generic_runtime_response_type_option_model == runtime_response_generic_runtime_response_type_option_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_response_generic_runtime_response_type_option_model_json2 = runtime_response_generic_runtime_response_type_option_model.to_dict() + assert runtime_response_generic_runtime_response_type_option_model_json2 == runtime_response_generic_runtime_response_type_option_model_json + + +class TestModel_RuntimeResponseGenericRuntimeResponseTypePause: + """ + Test Class for RuntimeResponseGenericRuntimeResponseTypePause + """ + + def test_runtime_response_generic_runtime_response_type_pause_serialization(self): + """ + Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypePause + """ + + # Construct dict forms of any model objects needed in order to build this model. + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'chat' + + # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypePause model + runtime_response_generic_runtime_response_type_pause_model_json = {} + runtime_response_generic_runtime_response_type_pause_model_json['response_type'] = 'pause' + runtime_response_generic_runtime_response_type_pause_model_json['time'] = 38 + runtime_response_generic_runtime_response_type_pause_model_json['typing'] = True + runtime_response_generic_runtime_response_type_pause_model_json['channels'] = [response_generic_channel_model] + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypePause by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_pause_model = RuntimeResponseGenericRuntimeResponseTypePause.from_dict(runtime_response_generic_runtime_response_type_pause_model_json) + assert runtime_response_generic_runtime_response_type_pause_model != False + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypePause by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_pause_model_dict = RuntimeResponseGenericRuntimeResponseTypePause.from_dict(runtime_response_generic_runtime_response_type_pause_model_json).__dict__ + runtime_response_generic_runtime_response_type_pause_model2 = RuntimeResponseGenericRuntimeResponseTypePause(**runtime_response_generic_runtime_response_type_pause_model_dict) + + # Verify the model instances are equivalent + assert runtime_response_generic_runtime_response_type_pause_model == runtime_response_generic_runtime_response_type_pause_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_response_generic_runtime_response_type_pause_model_json2 = runtime_response_generic_runtime_response_type_pause_model.to_dict() + assert runtime_response_generic_runtime_response_type_pause_model_json2 == runtime_response_generic_runtime_response_type_pause_model_json + + +class TestModel_RuntimeResponseGenericRuntimeResponseTypeSuggestion: + """ + Test Class for RuntimeResponseGenericRuntimeResponseTypeSuggestion + """ + + def test_runtime_response_generic_runtime_response_type_suggestion_serialization(self): + """ + Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeSuggestion + """ + + # Construct dict forms of any model objects needed in order to build this model. + + message_input_model = {} # MessageInput + message_input_model['text'] = 'testString' + message_input_model['spelling_suggestions'] = False + message_input_model['spelling_auto_correct'] = False + message_input_model['foo'] = 'testString' + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + + dialog_suggestion_value_model = {} # DialogSuggestionValue + dialog_suggestion_value_model['input'] = message_input_model + dialog_suggestion_value_model['intents'] = [runtime_intent_model] + dialog_suggestion_value_model['entities'] = [runtime_entity_model] + + dialog_suggestion_model = {} # DialogSuggestion + dialog_suggestion_model['label'] = 'testString' + dialog_suggestion_model['value'] = dialog_suggestion_value_model + dialog_suggestion_model['output'] = {'anyKey': 'anyValue'} + dialog_suggestion_model['dialog_node'] = 'testString' + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'chat' + + # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeSuggestion model + runtime_response_generic_runtime_response_type_suggestion_model_json = {} + runtime_response_generic_runtime_response_type_suggestion_model_json['response_type'] = 'suggestion' + runtime_response_generic_runtime_response_type_suggestion_model_json['title'] = 'testString' + runtime_response_generic_runtime_response_type_suggestion_model_json['suggestions'] = [dialog_suggestion_model] + runtime_response_generic_runtime_response_type_suggestion_model_json['channels'] = [response_generic_channel_model] + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeSuggestion by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_suggestion_model = RuntimeResponseGenericRuntimeResponseTypeSuggestion.from_dict(runtime_response_generic_runtime_response_type_suggestion_model_json) + assert runtime_response_generic_runtime_response_type_suggestion_model != False + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeSuggestion by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_suggestion_model_dict = RuntimeResponseGenericRuntimeResponseTypeSuggestion.from_dict(runtime_response_generic_runtime_response_type_suggestion_model_json).__dict__ + runtime_response_generic_runtime_response_type_suggestion_model2 = RuntimeResponseGenericRuntimeResponseTypeSuggestion(**runtime_response_generic_runtime_response_type_suggestion_model_dict) + + # Verify the model instances are equivalent + assert runtime_response_generic_runtime_response_type_suggestion_model == runtime_response_generic_runtime_response_type_suggestion_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_response_generic_runtime_response_type_suggestion_model_json2 = runtime_response_generic_runtime_response_type_suggestion_model.to_dict() + assert runtime_response_generic_runtime_response_type_suggestion_model_json2 == runtime_response_generic_runtime_response_type_suggestion_model_json + + +class TestModel_RuntimeResponseGenericRuntimeResponseTypeText: + """ + Test Class for RuntimeResponseGenericRuntimeResponseTypeText + """ + + def test_runtime_response_generic_runtime_response_type_text_serialization(self): + """ + Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeText + """ + + # Construct dict forms of any model objects needed in order to build this model. + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'chat' + + # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeText model + runtime_response_generic_runtime_response_type_text_model_json = {} + runtime_response_generic_runtime_response_type_text_model_json['response_type'] = 'text' + runtime_response_generic_runtime_response_type_text_model_json['text'] = 'testString' + runtime_response_generic_runtime_response_type_text_model_json['channels'] = [response_generic_channel_model] + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeText by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_text_model = RuntimeResponseGenericRuntimeResponseTypeText.from_dict(runtime_response_generic_runtime_response_type_text_model_json) + assert runtime_response_generic_runtime_response_type_text_model != False + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeText by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_text_model_dict = RuntimeResponseGenericRuntimeResponseTypeText.from_dict(runtime_response_generic_runtime_response_type_text_model_json).__dict__ + runtime_response_generic_runtime_response_type_text_model2 = RuntimeResponseGenericRuntimeResponseTypeText(**runtime_response_generic_runtime_response_type_text_model_dict) + + # Verify the model instances are equivalent + assert runtime_response_generic_runtime_response_type_text_model == runtime_response_generic_runtime_response_type_text_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_response_generic_runtime_response_type_text_model_json2 = runtime_response_generic_runtime_response_type_text_model.to_dict() + assert runtime_response_generic_runtime_response_type_text_model_json2 == runtime_response_generic_runtime_response_type_text_model_json + + +class TestModel_RuntimeResponseGenericRuntimeResponseTypeUserDefined: + """ + Test Class for RuntimeResponseGenericRuntimeResponseTypeUserDefined + """ + + def test_runtime_response_generic_runtime_response_type_user_defined_serialization(self): + """ + Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeUserDefined + """ + + # Construct dict forms of any model objects needed in order to build this model. + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'chat' + + # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeUserDefined model + runtime_response_generic_runtime_response_type_user_defined_model_json = {} + runtime_response_generic_runtime_response_type_user_defined_model_json['response_type'] = 'user_defined' + runtime_response_generic_runtime_response_type_user_defined_model_json['user_defined'] = {'anyKey': 'anyValue'} + runtime_response_generic_runtime_response_type_user_defined_model_json['channels'] = [response_generic_channel_model] + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeUserDefined by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_user_defined_model = RuntimeResponseGenericRuntimeResponseTypeUserDefined.from_dict(runtime_response_generic_runtime_response_type_user_defined_model_json) + assert runtime_response_generic_runtime_response_type_user_defined_model != False + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeUserDefined by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_user_defined_model_dict = RuntimeResponseGenericRuntimeResponseTypeUserDefined.from_dict(runtime_response_generic_runtime_response_type_user_defined_model_json).__dict__ + runtime_response_generic_runtime_response_type_user_defined_model2 = RuntimeResponseGenericRuntimeResponseTypeUserDefined(**runtime_response_generic_runtime_response_type_user_defined_model_dict) + + # Verify the model instances are equivalent + assert runtime_response_generic_runtime_response_type_user_defined_model == runtime_response_generic_runtime_response_type_user_defined_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_response_generic_runtime_response_type_user_defined_model_json2 = runtime_response_generic_runtime_response_type_user_defined_model.to_dict() + assert runtime_response_generic_runtime_response_type_user_defined_model_json2 == runtime_response_generic_runtime_response_type_user_defined_model_json + + +class TestModel_RuntimeResponseGenericRuntimeResponseTypeVideo: + """ + Test Class for RuntimeResponseGenericRuntimeResponseTypeVideo + """ + + def test_runtime_response_generic_runtime_response_type_video_serialization(self): + """ + Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeVideo + """ + + # Construct dict forms of any model objects needed in order to build this model. + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'chat' + + # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeVideo model + runtime_response_generic_runtime_response_type_video_model_json = {} + runtime_response_generic_runtime_response_type_video_model_json['response_type'] = 'video' + runtime_response_generic_runtime_response_type_video_model_json['source'] = 'testString' + runtime_response_generic_runtime_response_type_video_model_json['title'] = 'testString' + runtime_response_generic_runtime_response_type_video_model_json['description'] = 'testString' + runtime_response_generic_runtime_response_type_video_model_json['channels'] = [response_generic_channel_model] + runtime_response_generic_runtime_response_type_video_model_json['channel_options'] = {'anyKey': 'anyValue'} + runtime_response_generic_runtime_response_type_video_model_json['alt_text'] = 'testString' + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeVideo by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_video_model = RuntimeResponseGenericRuntimeResponseTypeVideo.from_dict(runtime_response_generic_runtime_response_type_video_model_json) + assert runtime_response_generic_runtime_response_type_video_model != False + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeVideo by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_video_model_dict = RuntimeResponseGenericRuntimeResponseTypeVideo.from_dict(runtime_response_generic_runtime_response_type_video_model_json).__dict__ + runtime_response_generic_runtime_response_type_video_model2 = RuntimeResponseGenericRuntimeResponseTypeVideo(**runtime_response_generic_runtime_response_type_video_model_dict) + + # Verify the model instances are equivalent + assert runtime_response_generic_runtime_response_type_video_model == runtime_response_generic_runtime_response_type_video_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_response_generic_runtime_response_type_video_model_json2 = runtime_response_generic_runtime_response_type_video_model.to_dict() + assert runtime_response_generic_runtime_response_type_video_model_json2 == runtime_response_generic_runtime_response_type_video_model_json + + +# endregion +############################################################################## +# End of Model Tests +############################################################################## diff --git a/test/unit/test_assistant_v2.py b/test/unit/test_assistant_v2.py index 9931c0071..f2ab84821 100644 --- a/test/unit/test_assistant_v2.py +++ b/test/unit/test_assistant_v2.py @@ -1,84 +1,16831 @@ -# coding: utf-8 +# -*- coding: utf-8 -*- +# (C) Copyright IBM Corp. 2019, 2026. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Unit Tests for AssistantV2 +""" + +from datetime import datetime, timezone +from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator +from ibm_cloud_sdk_core.utils import datetime_to_string, string_to_datetime +import inspect +import io import json +import pytest +import re +import requests import responses -import ibm_watson - -platform_url = 'https://gateway.watsonplatform.net' -service_path = '/assistant/api' -base_url = '{0}{1}'.format(platform_url, service_path) - -@responses.activate -def test_create_session(): - endpoint = '/v2/assistants/{0}/sessions'.format('bogus_id') - url = '{0}{1}'.format(base_url, endpoint) - response = {'session_id': 'session_id'} - responses.add( - responses.POST, - url, - body=json.dumps(response), - status=200, - content_type='application/json') - service = ibm_watson.AssistantV2( - username='username', password='password', version='2017-02-03') - session = service.create_session('bogus_id').get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert session == response - - -@responses.activate -def test_delete_session(): - endpoint = '/v2/assistants/{0}/sessions/{1}'.format('bogus_id', - 'session_id') - url = '{0}{1}'.format(base_url, endpoint) - response = {} - responses.add( - responses.DELETE, - url, - body=json.dumps(response), - status=200, - content_type='application/json') - service = ibm_watson.AssistantV2( - username='username', password='password', version='2017-02-03') - delete_session = service.delete_session('bogus_id', - 'session_id').get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert delete_session == response - - -@responses.activate -def test_message(): - endpoint = '/v2/assistants/{0}/sessions/{1}/message'.format( - 'bogus_id', 'session_id') - url = '{0}{1}'.format(base_url, endpoint) - response = { - 'output': { - 'generic': [{ - 'text': - 'I did not understand that. I can help you get pizza, tell a joke or find a movie.', - 'response_type': - 'text' - }], - 'entities': [], - 'intents': [{ - 'confidence': 0.8521236419677736, - 'intent': 'Weather' - }] - } - } - responses.add( - responses.POST, - url, - body=json.dumps(response), - status=200, - content_type='application/json') - service = ibm_watson.AssistantV2( - username='username', password='password', version='2017-02-03') - message = service.message( - 'bogus_id', 'session_id', input={ - 'text': 'What\'s the weather like?' - }).get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert message == response +import tempfile +import urllib +from ibm_watson.assistant_v2 import * + +version = 'testString' + +_service = AssistantV2( + authenticator=NoAuthAuthenticator(), + version=version, +) + +_base_url = 'https://api.us-south.assistant.watson.cloud.ibm.com' +_service.set_service_url(_base_url) + + +def preprocess_url(operation_path: str): + """ + Returns the request url associated with the specified operation path. + This will be base_url concatenated with a quoted version of operation_path. + The returned request URL is used to register the mock response so it needs + to match the request URL that is formed by the requests library. + """ + + # Form the request URL from the base URL and operation path. + request_url = _base_url + operation_path + + # If the request url does NOT end with a /, then just return it as-is. + # Otherwise, return a regular expression that matches one or more trailing /. + if not request_url.endswith('/'): + return request_url + return re.compile(request_url.rstrip('/') + '/+') + + +############################################################################## +# Start of Service: ConversationalSkillProviders +############################################################################## +# region + + +class TestCreateProvider: + """ + Test Class for create_provider + """ + + @responses.activate + def test_create_provider_all_params(self): + """ + create_provider() + """ + # Set up mock + url = preprocess_url('/v2/providers') + mock_response = '{"provider_id": "provider_id", "specification": {"servers": [{"url": "url"}], "components": {"securitySchemes": {"authentication_method": "basic", "basic": {"username": {"type": "value", "value": "value"}}, "oauth2": {"preferred_flow": "password", "flows": {"token_url": "token_url", "refresh_url": "refresh_url", "client_auth_type": "Body", "content_type": "content_type", "header_prefix": "header_prefix", "username": {"type": "value", "value": "value"}}}}}}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Construct a dict representation of a ProviderSpecificationServersItem model + provider_specification_servers_item_model = {} + provider_specification_servers_item_model['url'] = 'testString' + + # Construct a dict representation of a ProviderAuthenticationTypeAndValue model + provider_authentication_type_and_value_model = {} + provider_authentication_type_and_value_model['type'] = 'value' + provider_authentication_type_and_value_model['value'] = 'testString' + + # Construct a dict representation of a ProviderSpecificationComponentsSecuritySchemesBasic model + provider_specification_components_security_schemes_basic_model = {} + provider_specification_components_security_schemes_basic_model['username'] = provider_authentication_type_and_value_model + + # Construct a dict representation of a ProviderAuthenticationOAuth2PasswordUsername model + provider_authentication_o_auth2_password_username_model = {} + provider_authentication_o_auth2_password_username_model['type'] = 'value' + provider_authentication_o_auth2_password_username_model['value'] = 'testString' + + # Construct a dict representation of a ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password model + provider_authentication_o_auth2_flows_model = {} + provider_authentication_o_auth2_flows_model['token_url'] = 'testString' + provider_authentication_o_auth2_flows_model['refresh_url'] = 'testString' + provider_authentication_o_auth2_flows_model['client_auth_type'] = 'Body' + provider_authentication_o_auth2_flows_model['content_type'] = 'testString' + provider_authentication_o_auth2_flows_model['header_prefix'] = 'testString' + provider_authentication_o_auth2_flows_model['username'] = provider_authentication_o_auth2_password_username_model + + # Construct a dict representation of a ProviderAuthenticationOAuth2 model + provider_authentication_o_auth2_model = {} + provider_authentication_o_auth2_model['preferred_flow'] = 'password' + provider_authentication_o_auth2_model['flows'] = provider_authentication_o_auth2_flows_model + + # Construct a dict representation of a ProviderSpecificationComponentsSecuritySchemes model + provider_specification_components_security_schemes_model = {} + provider_specification_components_security_schemes_model['authentication_method'] = 'basic' + provider_specification_components_security_schemes_model['basic'] = provider_specification_components_security_schemes_basic_model + provider_specification_components_security_schemes_model['oauth2'] = provider_authentication_o_auth2_model + + # Construct a dict representation of a ProviderSpecificationComponents model + provider_specification_components_model = {} + provider_specification_components_model['securitySchemes'] = provider_specification_components_security_schemes_model + + # Construct a dict representation of a ProviderSpecification model + provider_specification_model = {} + provider_specification_model['servers'] = [provider_specification_servers_item_model] + provider_specification_model['components'] = provider_specification_components_model + + # Construct a dict representation of a ProviderPrivateAuthenticationBearerFlow model + provider_private_authentication_model = {} + provider_private_authentication_model['token'] = provider_authentication_type_and_value_model + + # Construct a dict representation of a ProviderPrivate model + provider_private_model = {} + provider_private_model['authentication'] = provider_private_authentication_model + + # Set up parameter values + provider_id = 'testString' + specification = provider_specification_model + private = provider_private_model + + # Invoke method + response = _service.create_provider( + provider_id, + specification, + private, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['provider_id'] == 'testString' + assert req_body['specification'] == provider_specification_model + assert req_body['private'] == provider_private_model + + def test_create_provider_all_params_with_retries(self): + # Enable retries and run test_create_provider_all_params. + _service.enable_retries() + self.test_create_provider_all_params() + + # Disable retries and run test_create_provider_all_params. + _service.disable_retries() + self.test_create_provider_all_params() + + @responses.activate + def test_create_provider_value_error(self): + """ + test_create_provider_value_error() + """ + # Set up mock + url = preprocess_url('/v2/providers') + mock_response = '{"provider_id": "provider_id", "specification": {"servers": [{"url": "url"}], "components": {"securitySchemes": {"authentication_method": "basic", "basic": {"username": {"type": "value", "value": "value"}}, "oauth2": {"preferred_flow": "password", "flows": {"token_url": "token_url", "refresh_url": "refresh_url", "client_auth_type": "Body", "content_type": "content_type", "header_prefix": "header_prefix", "username": {"type": "value", "value": "value"}}}}}}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Construct a dict representation of a ProviderSpecificationServersItem model + provider_specification_servers_item_model = {} + provider_specification_servers_item_model['url'] = 'testString' + + # Construct a dict representation of a ProviderAuthenticationTypeAndValue model + provider_authentication_type_and_value_model = {} + provider_authentication_type_and_value_model['type'] = 'value' + provider_authentication_type_and_value_model['value'] = 'testString' + + # Construct a dict representation of a ProviderSpecificationComponentsSecuritySchemesBasic model + provider_specification_components_security_schemes_basic_model = {} + provider_specification_components_security_schemes_basic_model['username'] = provider_authentication_type_and_value_model + + # Construct a dict representation of a ProviderAuthenticationOAuth2PasswordUsername model + provider_authentication_o_auth2_password_username_model = {} + provider_authentication_o_auth2_password_username_model['type'] = 'value' + provider_authentication_o_auth2_password_username_model['value'] = 'testString' + + # Construct a dict representation of a ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password model + provider_authentication_o_auth2_flows_model = {} + provider_authentication_o_auth2_flows_model['token_url'] = 'testString' + provider_authentication_o_auth2_flows_model['refresh_url'] = 'testString' + provider_authentication_o_auth2_flows_model['client_auth_type'] = 'Body' + provider_authentication_o_auth2_flows_model['content_type'] = 'testString' + provider_authentication_o_auth2_flows_model['header_prefix'] = 'testString' + provider_authentication_o_auth2_flows_model['username'] = provider_authentication_o_auth2_password_username_model + + # Construct a dict representation of a ProviderAuthenticationOAuth2 model + provider_authentication_o_auth2_model = {} + provider_authentication_o_auth2_model['preferred_flow'] = 'password' + provider_authentication_o_auth2_model['flows'] = provider_authentication_o_auth2_flows_model + + # Construct a dict representation of a ProviderSpecificationComponentsSecuritySchemes model + provider_specification_components_security_schemes_model = {} + provider_specification_components_security_schemes_model['authentication_method'] = 'basic' + provider_specification_components_security_schemes_model['basic'] = provider_specification_components_security_schemes_basic_model + provider_specification_components_security_schemes_model['oauth2'] = provider_authentication_o_auth2_model + + # Construct a dict representation of a ProviderSpecificationComponents model + provider_specification_components_model = {} + provider_specification_components_model['securitySchemes'] = provider_specification_components_security_schemes_model + + # Construct a dict representation of a ProviderSpecification model + provider_specification_model = {} + provider_specification_model['servers'] = [provider_specification_servers_item_model] + provider_specification_model['components'] = provider_specification_components_model + + # Construct a dict representation of a ProviderPrivateAuthenticationBearerFlow model + provider_private_authentication_model = {} + provider_private_authentication_model['token'] = provider_authentication_type_and_value_model + + # Construct a dict representation of a ProviderPrivate model + provider_private_model = {} + provider_private_model['authentication'] = provider_private_authentication_model + + # Set up parameter values + provider_id = 'testString' + specification = provider_specification_model + private = provider_private_model + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "provider_id": provider_id, + "specification": specification, + "private": private, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.create_provider(**req_copy) + + def test_create_provider_value_error_with_retries(self): + # Enable retries and run test_create_provider_value_error. + _service.enable_retries() + self.test_create_provider_value_error() + + # Disable retries and run test_create_provider_value_error. + _service.disable_retries() + self.test_create_provider_value_error() + + +class TestListProviders: + """ + Test Class for list_providers + """ + + @responses.activate + def test_list_providers_all_params(self): + """ + list_providers() + """ + # Set up mock + url = preprocess_url('/v2/providers') + mock_response = '{"conversational_skill_providers": [{"provider_id": "provider_id", "specification": {"servers": [{"url": "url"}], "components": {"securitySchemes": {"authentication_method": "basic", "basic": {"username": {"type": "value", "value": "value"}}, "oauth2": {"preferred_flow": "password", "flows": {"token_url": "token_url", "refresh_url": "refresh_url", "client_auth_type": "Body", "content_type": "content_type", "header_prefix": "header_prefix", "username": {"type": "value", "value": "value"}}}}}}}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + page_limit = 100 + include_count = False + sort = 'name' + cursor = 'testString' + include_audit = False + + # Invoke method + response = _service.list_providers( + page_limit=page_limit, + include_count=include_count, + sort=sort, + cursor=cursor, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'page_limit={}'.format(page_limit) in query_string + assert 'include_count={}'.format('true' if include_count else 'false') in query_string + assert 'sort={}'.format(sort) in query_string + assert 'cursor={}'.format(cursor) in query_string + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + + def test_list_providers_all_params_with_retries(self): + # Enable retries and run test_list_providers_all_params. + _service.enable_retries() + self.test_list_providers_all_params() + + # Disable retries and run test_list_providers_all_params. + _service.disable_retries() + self.test_list_providers_all_params() + + @responses.activate + def test_list_providers_required_params(self): + """ + test_list_providers_required_params() + """ + # Set up mock + url = preprocess_url('/v2/providers') + mock_response = '{"conversational_skill_providers": [{"provider_id": "provider_id", "specification": {"servers": [{"url": "url"}], "components": {"securitySchemes": {"authentication_method": "basic", "basic": {"username": {"type": "value", "value": "value"}}, "oauth2": {"preferred_flow": "password", "flows": {"token_url": "token_url", "refresh_url": "refresh_url", "client_auth_type": "Body", "content_type": "content_type", "header_prefix": "header_prefix", "username": {"type": "value", "value": "value"}}}}}}}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Invoke method + response = _service.list_providers() + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_providers_required_params_with_retries(self): + # Enable retries and run test_list_providers_required_params. + _service.enable_retries() + self.test_list_providers_required_params() + + # Disable retries and run test_list_providers_required_params. + _service.disable_retries() + self.test_list_providers_required_params() + + @responses.activate + def test_list_providers_value_error(self): + """ + test_list_providers_value_error() + """ + # Set up mock + url = preprocess_url('/v2/providers') + mock_response = '{"conversational_skill_providers": [{"provider_id": "provider_id", "specification": {"servers": [{"url": "url"}], "components": {"securitySchemes": {"authentication_method": "basic", "basic": {"username": {"type": "value", "value": "value"}}, "oauth2": {"preferred_flow": "password", "flows": {"token_url": "token_url", "refresh_url": "refresh_url", "client_auth_type": "Body", "content_type": "content_type", "header_prefix": "header_prefix", "username": {"type": "value", "value": "value"}}}}}}}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.list_providers(**req_copy) + + def test_list_providers_value_error_with_retries(self): + # Enable retries and run test_list_providers_value_error. + _service.enable_retries() + self.test_list_providers_value_error() + + # Disable retries and run test_list_providers_value_error. + _service.disable_retries() + self.test_list_providers_value_error() + + +class TestUpdateProvider: + """ + Test Class for update_provider + """ + + @responses.activate + def test_update_provider_all_params(self): + """ + update_provider() + """ + # Set up mock + url = preprocess_url('/v2/providers/testString') + mock_response = '{"provider_id": "provider_id", "specification": {"servers": [{"url": "url"}], "components": {"securitySchemes": {"authentication_method": "basic", "basic": {"username": {"type": "value", "value": "value"}}, "oauth2": {"preferred_flow": "password", "flows": {"token_url": "token_url", "refresh_url": "refresh_url", "client_auth_type": "Body", "content_type": "content_type", "header_prefix": "header_prefix", "username": {"type": "value", "value": "value"}}}}}}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Construct a dict representation of a ProviderSpecificationServersItem model + provider_specification_servers_item_model = {} + provider_specification_servers_item_model['url'] = 'testString' + + # Construct a dict representation of a ProviderAuthenticationTypeAndValue model + provider_authentication_type_and_value_model = {} + provider_authentication_type_and_value_model['type'] = 'value' + provider_authentication_type_and_value_model['value'] = 'testString' + + # Construct a dict representation of a ProviderSpecificationComponentsSecuritySchemesBasic model + provider_specification_components_security_schemes_basic_model = {} + provider_specification_components_security_schemes_basic_model['username'] = provider_authentication_type_and_value_model + + # Construct a dict representation of a ProviderAuthenticationOAuth2PasswordUsername model + provider_authentication_o_auth2_password_username_model = {} + provider_authentication_o_auth2_password_username_model['type'] = 'value' + provider_authentication_o_auth2_password_username_model['value'] = 'testString' + + # Construct a dict representation of a ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password model + provider_authentication_o_auth2_flows_model = {} + provider_authentication_o_auth2_flows_model['token_url'] = 'testString' + provider_authentication_o_auth2_flows_model['refresh_url'] = 'testString' + provider_authentication_o_auth2_flows_model['client_auth_type'] = 'Body' + provider_authentication_o_auth2_flows_model['content_type'] = 'testString' + provider_authentication_o_auth2_flows_model['header_prefix'] = 'testString' + provider_authentication_o_auth2_flows_model['username'] = provider_authentication_o_auth2_password_username_model + + # Construct a dict representation of a ProviderAuthenticationOAuth2 model + provider_authentication_o_auth2_model = {} + provider_authentication_o_auth2_model['preferred_flow'] = 'password' + provider_authentication_o_auth2_model['flows'] = provider_authentication_o_auth2_flows_model + + # Construct a dict representation of a ProviderSpecificationComponentsSecuritySchemes model + provider_specification_components_security_schemes_model = {} + provider_specification_components_security_schemes_model['authentication_method'] = 'basic' + provider_specification_components_security_schemes_model['basic'] = provider_specification_components_security_schemes_basic_model + provider_specification_components_security_schemes_model['oauth2'] = provider_authentication_o_auth2_model + + # Construct a dict representation of a ProviderSpecificationComponents model + provider_specification_components_model = {} + provider_specification_components_model['securitySchemes'] = provider_specification_components_security_schemes_model + + # Construct a dict representation of a ProviderSpecification model + provider_specification_model = {} + provider_specification_model['servers'] = [provider_specification_servers_item_model] + provider_specification_model['components'] = provider_specification_components_model + + # Construct a dict representation of a ProviderPrivateAuthenticationBearerFlow model + provider_private_authentication_model = {} + provider_private_authentication_model['token'] = provider_authentication_type_and_value_model + + # Construct a dict representation of a ProviderPrivate model + provider_private_model = {} + provider_private_model['authentication'] = provider_private_authentication_model + + # Set up parameter values + provider_id = 'testString' + specification = provider_specification_model + private = provider_private_model + + # Invoke method + response = _service.update_provider( + provider_id, + specification, + private, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['specification'] == provider_specification_model + assert req_body['private'] == provider_private_model + + def test_update_provider_all_params_with_retries(self): + # Enable retries and run test_update_provider_all_params. + _service.enable_retries() + self.test_update_provider_all_params() + + # Disable retries and run test_update_provider_all_params. + _service.disable_retries() + self.test_update_provider_all_params() + + @responses.activate + def test_update_provider_value_error(self): + """ + test_update_provider_value_error() + """ + # Set up mock + url = preprocess_url('/v2/providers/testString') + mock_response = '{"provider_id": "provider_id", "specification": {"servers": [{"url": "url"}], "components": {"securitySchemes": {"authentication_method": "basic", "basic": {"username": {"type": "value", "value": "value"}}, "oauth2": {"preferred_flow": "password", "flows": {"token_url": "token_url", "refresh_url": "refresh_url", "client_auth_type": "Body", "content_type": "content_type", "header_prefix": "header_prefix", "username": {"type": "value", "value": "value"}}}}}}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Construct a dict representation of a ProviderSpecificationServersItem model + provider_specification_servers_item_model = {} + provider_specification_servers_item_model['url'] = 'testString' + + # Construct a dict representation of a ProviderAuthenticationTypeAndValue model + provider_authentication_type_and_value_model = {} + provider_authentication_type_and_value_model['type'] = 'value' + provider_authentication_type_and_value_model['value'] = 'testString' + + # Construct a dict representation of a ProviderSpecificationComponentsSecuritySchemesBasic model + provider_specification_components_security_schemes_basic_model = {} + provider_specification_components_security_schemes_basic_model['username'] = provider_authentication_type_and_value_model + + # Construct a dict representation of a ProviderAuthenticationOAuth2PasswordUsername model + provider_authentication_o_auth2_password_username_model = {} + provider_authentication_o_auth2_password_username_model['type'] = 'value' + provider_authentication_o_auth2_password_username_model['value'] = 'testString' + + # Construct a dict representation of a ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password model + provider_authentication_o_auth2_flows_model = {} + provider_authentication_o_auth2_flows_model['token_url'] = 'testString' + provider_authentication_o_auth2_flows_model['refresh_url'] = 'testString' + provider_authentication_o_auth2_flows_model['client_auth_type'] = 'Body' + provider_authentication_o_auth2_flows_model['content_type'] = 'testString' + provider_authentication_o_auth2_flows_model['header_prefix'] = 'testString' + provider_authentication_o_auth2_flows_model['username'] = provider_authentication_o_auth2_password_username_model + + # Construct a dict representation of a ProviderAuthenticationOAuth2 model + provider_authentication_o_auth2_model = {} + provider_authentication_o_auth2_model['preferred_flow'] = 'password' + provider_authentication_o_auth2_model['flows'] = provider_authentication_o_auth2_flows_model + + # Construct a dict representation of a ProviderSpecificationComponentsSecuritySchemes model + provider_specification_components_security_schemes_model = {} + provider_specification_components_security_schemes_model['authentication_method'] = 'basic' + provider_specification_components_security_schemes_model['basic'] = provider_specification_components_security_schemes_basic_model + provider_specification_components_security_schemes_model['oauth2'] = provider_authentication_o_auth2_model + + # Construct a dict representation of a ProviderSpecificationComponents model + provider_specification_components_model = {} + provider_specification_components_model['securitySchemes'] = provider_specification_components_security_schemes_model + + # Construct a dict representation of a ProviderSpecification model + provider_specification_model = {} + provider_specification_model['servers'] = [provider_specification_servers_item_model] + provider_specification_model['components'] = provider_specification_components_model + + # Construct a dict representation of a ProviderPrivateAuthenticationBearerFlow model + provider_private_authentication_model = {} + provider_private_authentication_model['token'] = provider_authentication_type_and_value_model + + # Construct a dict representation of a ProviderPrivate model + provider_private_model = {} + provider_private_model['authentication'] = provider_private_authentication_model + + # Set up parameter values + provider_id = 'testString' + specification = provider_specification_model + private = provider_private_model + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "provider_id": provider_id, + "specification": specification, + "private": private, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.update_provider(**req_copy) + + def test_update_provider_value_error_with_retries(self): + # Enable retries and run test_update_provider_value_error. + _service.enable_retries() + self.test_update_provider_value_error() + + # Disable retries and run test_update_provider_value_error. + _service.disable_retries() + self.test_update_provider_value_error() + + +# endregion +############################################################################## +# End of Service: ConversationalSkillProviders +############################################################################## + +############################################################################## +# Start of Service: Assistants +############################################################################## +# region + + +class TestCreateAssistant: + """ + Test Class for create_assistant + """ + + @responses.activate + def test_create_assistant_all_params(self): + """ + create_assistant() + """ + # Set up mock + url = preprocess_url('/v2/assistants') + mock_response = '{"assistant_id": "assistant_id", "name": "name", "description": "description", "language": "language", "assistant_skills": [{"skill_id": "skill_id", "type": "dialog"}], "assistant_environments": [{"name": "name", "environment_id": "environment_id", "environment": "draft"}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + language = 'testString' + name = 'testString' + description = 'testString' + + # Invoke method + response = _service.create_assistant( + language=language, + name=name, + description=description, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['language'] == 'testString' + assert req_body['name'] == 'testString' + assert req_body['description'] == 'testString' + + def test_create_assistant_all_params_with_retries(self): + # Enable retries and run test_create_assistant_all_params. + _service.enable_retries() + self.test_create_assistant_all_params() + + # Disable retries and run test_create_assistant_all_params. + _service.disable_retries() + self.test_create_assistant_all_params() + + @responses.activate + def test_create_assistant_required_params(self): + """ + test_create_assistant_required_params() + """ + # Set up mock + url = preprocess_url('/v2/assistants') + mock_response = '{"assistant_id": "assistant_id", "name": "name", "description": "description", "language": "language", "assistant_skills": [{"skill_id": "skill_id", "type": "dialog"}], "assistant_environments": [{"name": "name", "environment_id": "environment_id", "environment": "draft"}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Invoke method + response = _service.create_assistant() + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_create_assistant_required_params_with_retries(self): + # Enable retries and run test_create_assistant_required_params. + _service.enable_retries() + self.test_create_assistant_required_params() + + # Disable retries and run test_create_assistant_required_params. + _service.disable_retries() + self.test_create_assistant_required_params() + + @responses.activate + def test_create_assistant_value_error(self): + """ + test_create_assistant_value_error() + """ + # Set up mock + url = preprocess_url('/v2/assistants') + mock_response = '{"assistant_id": "assistant_id", "name": "name", "description": "description", "language": "language", "assistant_skills": [{"skill_id": "skill_id", "type": "dialog"}], "assistant_environments": [{"name": "name", "environment_id": "environment_id", "environment": "draft"}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.create_assistant(**req_copy) + + def test_create_assistant_value_error_with_retries(self): + # Enable retries and run test_create_assistant_value_error. + _service.enable_retries() + self.test_create_assistant_value_error() + + # Disable retries and run test_create_assistant_value_error. + _service.disable_retries() + self.test_create_assistant_value_error() + + +class TestListAssistants: + """ + Test Class for list_assistants + """ + + @responses.activate + def test_list_assistants_all_params(self): + """ + list_assistants() + """ + # Set up mock + url = preprocess_url('/v2/assistants') + mock_response = '{"assistants": [{"assistant_id": "assistant_id", "name": "name", "description": "description", "language": "language", "assistant_skills": [{"skill_id": "skill_id", "type": "dialog"}], "assistant_environments": [{"name": "name", "environment_id": "environment_id", "environment": "draft"}]}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + page_limit = 100 + include_count = False + sort = 'name' + cursor = 'testString' + include_audit = False + + # Invoke method + response = _service.list_assistants( + page_limit=page_limit, + include_count=include_count, + sort=sort, + cursor=cursor, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'page_limit={}'.format(page_limit) in query_string + assert 'include_count={}'.format('true' if include_count else 'false') in query_string + assert 'sort={}'.format(sort) in query_string + assert 'cursor={}'.format(cursor) in query_string + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + + def test_list_assistants_all_params_with_retries(self): + # Enable retries and run test_list_assistants_all_params. + _service.enable_retries() + self.test_list_assistants_all_params() + + # Disable retries and run test_list_assistants_all_params. + _service.disable_retries() + self.test_list_assistants_all_params() + + @responses.activate + def test_list_assistants_required_params(self): + """ + test_list_assistants_required_params() + """ + # Set up mock + url = preprocess_url('/v2/assistants') + mock_response = '{"assistants": [{"assistant_id": "assistant_id", "name": "name", "description": "description", "language": "language", "assistant_skills": [{"skill_id": "skill_id", "type": "dialog"}], "assistant_environments": [{"name": "name", "environment_id": "environment_id", "environment": "draft"}]}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Invoke method + response = _service.list_assistants() + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_assistants_required_params_with_retries(self): + # Enable retries and run test_list_assistants_required_params. + _service.enable_retries() + self.test_list_assistants_required_params() + + # Disable retries and run test_list_assistants_required_params. + _service.disable_retries() + self.test_list_assistants_required_params() + + @responses.activate + def test_list_assistants_value_error(self): + """ + test_list_assistants_value_error() + """ + # Set up mock + url = preprocess_url('/v2/assistants') + mock_response = '{"assistants": [{"assistant_id": "assistant_id", "name": "name", "description": "description", "language": "language", "assistant_skills": [{"skill_id": "skill_id", "type": "dialog"}], "assistant_environments": [{"name": "name", "environment_id": "environment_id", "environment": "draft"}]}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.list_assistants(**req_copy) + + def test_list_assistants_value_error_with_retries(self): + # Enable retries and run test_list_assistants_value_error. + _service.enable_retries() + self.test_list_assistants_value_error() + + # Disable retries and run test_list_assistants_value_error. + _service.disable_retries() + self.test_list_assistants_value_error() + + +class TestDeleteAssistant: + """ + Test Class for delete_assistant + """ + + @responses.activate + def test_delete_assistant_all_params(self): + """ + delete_assistant() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + + # Invoke method + response = _service.delete_assistant( + assistant_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_delete_assistant_all_params_with_retries(self): + # Enable retries and run test_delete_assistant_all_params. + _service.enable_retries() + self.test_delete_assistant_all_params() + + # Disable retries and run test_delete_assistant_all_params. + _service.disable_retries() + self.test_delete_assistant_all_params() + + @responses.activate + def test_delete_assistant_value_error(self): + """ + test_delete_assistant_value_error() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "assistant_id": assistant_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_assistant(**req_copy) + + def test_delete_assistant_value_error_with_retries(self): + # Enable retries and run test_delete_assistant_value_error. + _service.enable_retries() + self.test_delete_assistant_value_error() + + # Disable retries and run test_delete_assistant_value_error. + _service.disable_retries() + self.test_delete_assistant_value_error() + + +# endregion +############################################################################## +# End of Service: Assistants +############################################################################## + +############################################################################## +# Start of Service: Sessions +############################################################################## +# region + + +class TestCreateSession: + """ + Test Class for create_session + """ + + @responses.activate + def test_create_session_all_params(self): + """ + create_session() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/environments/testString/sessions') + mock_response = '{"session_id": "session_id"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Construct a dict representation of a RequestAnalytics model + request_analytics_model = {} + request_analytics_model['browser'] = 'testString' + request_analytics_model['device'] = 'testString' + request_analytics_model['pageUrl'] = 'testString' + + # Set up parameter values + assistant_id = 'testString' + environment_id = 'testString' + analytics = request_analytics_model + + # Invoke method + response = _service.create_session( + assistant_id, + environment_id, + analytics=analytics, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['analytics'] == request_analytics_model + + def test_create_session_all_params_with_retries(self): + # Enable retries and run test_create_session_all_params. + _service.enable_retries() + self.test_create_session_all_params() + + # Disable retries and run test_create_session_all_params. + _service.disable_retries() + self.test_create_session_all_params() + + @responses.activate + def test_create_session_required_params(self): + """ + test_create_session_required_params() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/environments/testString/sessions') + mock_response = '{"session_id": "session_id"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Set up parameter values + assistant_id = 'testString' + environment_id = 'testString' + + # Invoke method + response = _service.create_session( + assistant_id, + environment_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + + def test_create_session_required_params_with_retries(self): + # Enable retries and run test_create_session_required_params. + _service.enable_retries() + self.test_create_session_required_params() + + # Disable retries and run test_create_session_required_params. + _service.disable_retries() + self.test_create_session_required_params() + + @responses.activate + def test_create_session_value_error(self): + """ + test_create_session_value_error() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/environments/testString/sessions') + mock_response = '{"session_id": "session_id"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Set up parameter values + assistant_id = 'testString' + environment_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "assistant_id": assistant_id, + "environment_id": environment_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.create_session(**req_copy) + + def test_create_session_value_error_with_retries(self): + # Enable retries and run test_create_session_value_error. + _service.enable_retries() + self.test_create_session_value_error() + + # Disable retries and run test_create_session_value_error. + _service.disable_retries() + self.test_create_session_value_error() + + +class TestDeleteSession: + """ + Test Class for delete_session + """ + + @responses.activate + def test_delete_session_all_params(self): + """ + delete_session() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/environments/testString/sessions/testString') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + environment_id = 'testString' + session_id = 'testString' + + # Invoke method + response = _service.delete_session( + assistant_id, + environment_id, + session_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_delete_session_all_params_with_retries(self): + # Enable retries and run test_delete_session_all_params. + _service.enable_retries() + self.test_delete_session_all_params() + + # Disable retries and run test_delete_session_all_params. + _service.disable_retries() + self.test_delete_session_all_params() + + @responses.activate + def test_delete_session_value_error(self): + """ + test_delete_session_value_error() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/environments/testString/sessions/testString') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + environment_id = 'testString' + session_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "assistant_id": assistant_id, + "environment_id": environment_id, + "session_id": session_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_session(**req_copy) + + def test_delete_session_value_error_with_retries(self): + # Enable retries and run test_delete_session_value_error. + _service.enable_retries() + self.test_delete_session_value_error() + + # Disable retries and run test_delete_session_value_error. + _service.disable_retries() + self.test_delete_session_value_error() + + +# endregion +############################################################################## +# End of Service: Sessions +############################################################################## + +############################################################################## +# Start of Service: Message +############################################################################## +# region + + +class TestMessage: + """ + Test Class for message + """ + + @responses.activate + def test_message_all_params(self): + """ + message() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/environments/testString/sessions/testString/message') + mock_response = '{"output": {"generic": [{"response_type": "conversation_search", "text": "text", "citations_title": "citations_title", "citations": [{"title": "title", "text": "text", "body": "body", "search_result_index": 19, "ranges": [{"start": 5, "end": 3}]}], "confidence_scores": {"threshold": 9, "pre_gen": 7, "post_gen": 8, "extractiveness": 14}, "response_length_option": "response_length_option", "search_results": [{"result_metadata": {"document_retrieval_source": "document_retrieval_source", "score": 5}, "id": "id", "title": "title", "body": "body"}], "disclaimer": "disclaimer"}], "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed", "turn_events": [{"event": "action_visited", "source": {"type": "action", "action": "action", "action_title": "action_title", "condition": "condition"}, "action_start_time": "action_start_time", "condition_type": "user_defined", "reason": "intent", "result_variable": "result_variable"}]}, "user_defined": {"anyKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}, "llm_metadata": [{"task": "task", "model_id": "model_id"}]}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time", "session_start_time": "session_start_time", "state": "state", "skip_user_input": false}, "session_id": "session_id"}, "skills": {"main skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}}, "actions skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}, "action_variables": {"anyKey": "anyValue"}, "skill_variables": {"anyKey": "anyValue"}}}, "integrations": {"anyKey": "anyValue"}}, "user_id": "user_id", "masked_output": {"generic": [{"response_type": "conversation_search", "text": "text", "citations_title": "citations_title", "citations": [{"title": "title", "text": "text", "body": "body", "search_result_index": 19, "ranges": [{"start": 5, "end": 3}]}], "confidence_scores": {"threshold": 9, "pre_gen": 7, "post_gen": 8, "extractiveness": 14}, "response_length_option": "response_length_option", "search_results": [{"result_metadata": {"document_retrieval_source": "document_retrieval_source", "score": 5}, "id": "id", "title": "title", "body": "body"}], "disclaimer": "disclaimer"}], "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed", "turn_events": [{"event": "action_visited", "source": {"type": "action", "action": "action", "action_title": "action_title", "condition": "condition"}, "action_start_time": "action_start_time", "condition_type": "user_defined", "reason": "intent", "result_variable": "result_variable"}]}, "user_defined": {"anyKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}, "llm_metadata": [{"task": "task", "model_id": "model_id"}]}, "masked_input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "suggestion_id": "suggestion_id", "attachments": [{"url": "url", "media_type": "media_type"}], "analytics": {"browser": "browser", "device": "device", "pageUrl": "page_url"}, "options": {"restart": false, "alternate_intents": false, "async_callout": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": false, "export": false}}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Construct a dict representation of a RuntimeIntent model + runtime_intent_model = {} + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + runtime_intent_model['skill'] = 'testString' + + # Construct a dict representation of a CaptureGroup model + capture_group_model = {} + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + # Construct a dict representation of a RuntimeEntityInterpretation model + runtime_entity_interpretation_model = {} + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + # Construct a dict representation of a RuntimeEntityAlternative model + runtime_entity_alternative_model = {} + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + # Construct a dict representation of a RuntimeEntityRole model + runtime_entity_role_model = {} + runtime_entity_role_model['type'] = 'date_from' + + # Construct a dict representation of a RuntimeEntity model + runtime_entity_model = {} + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + runtime_entity_model['skill'] = 'testString' + + # Construct a dict representation of a MessageInputAttachment model + message_input_attachment_model = {} + message_input_attachment_model['url'] = 'testString' + message_input_attachment_model['media_type'] = 'testString' + + # Construct a dict representation of a RequestAnalytics model + request_analytics_model = {} + request_analytics_model['browser'] = 'testString' + request_analytics_model['device'] = 'testString' + request_analytics_model['pageUrl'] = 'testString' + + # Construct a dict representation of a MessageInputOptionsSpelling model + message_input_options_spelling_model = {} + message_input_options_spelling_model['suggestions'] = True + message_input_options_spelling_model['auto_correct'] = True + + # Construct a dict representation of a MessageInputOptions model + message_input_options_model = {} + message_input_options_model['restart'] = False + message_input_options_model['alternate_intents'] = False + message_input_options_model['async_callout'] = False + message_input_options_model['spelling'] = message_input_options_spelling_model + message_input_options_model['debug'] = False + message_input_options_model['return_context'] = False + message_input_options_model['export'] = False + + # Construct a dict representation of a MessageInput model + message_input_model = {} + message_input_model['message_type'] = 'text' + message_input_model['text'] = 'testString' + message_input_model['intents'] = [runtime_intent_model] + message_input_model['entities'] = [runtime_entity_model] + message_input_model['suggestion_id'] = 'testString' + message_input_model['attachments'] = [message_input_attachment_model] + message_input_model['analytics'] = request_analytics_model + message_input_model['options'] = message_input_options_model + + # Construct a dict representation of a MessageContextGlobalSystem model + message_context_global_system_model = {} + message_context_global_system_model['timezone'] = 'testString' + message_context_global_system_model['user_id'] = 'testString' + message_context_global_system_model['turn_count'] = 38 + message_context_global_system_model['locale'] = 'en-us' + message_context_global_system_model['reference_time'] = 'testString' + message_context_global_system_model['session_start_time'] = 'testString' + message_context_global_system_model['state'] = 'testString' + message_context_global_system_model['skip_user_input'] = True + + # Construct a dict representation of a MessageContextGlobal model + message_context_global_model = {} + message_context_global_model['system'] = message_context_global_system_model + + # Construct a dict representation of a MessageContextSkillSystem model + message_context_skill_system_model = {} + message_context_skill_system_model['state'] = 'testString' + message_context_skill_system_model['foo'] = 'testString' + + # Construct a dict representation of a MessageContextDialogSkill model + message_context_dialog_skill_model = {} + message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'} + message_context_dialog_skill_model['system'] = message_context_skill_system_model + + # Construct a dict representation of a MessageContextActionSkill model + message_context_action_skill_model = {} + message_context_action_skill_model['user_defined'] = {'anyKey': 'anyValue'} + message_context_action_skill_model['system'] = message_context_skill_system_model + message_context_action_skill_model['action_variables'] = {'anyKey': 'anyValue'} + message_context_action_skill_model['skill_variables'] = {'anyKey': 'anyValue'} + + # Construct a dict representation of a MessageContextSkills model + message_context_skills_model = {} + message_context_skills_model['main skill'] = message_context_dialog_skill_model + message_context_skills_model['actions skill'] = message_context_action_skill_model + + # Construct a dict representation of a MessageContext model + message_context_model = {} + message_context_model['global'] = message_context_global_model + message_context_model['skills'] = message_context_skills_model + message_context_model['integrations'] = {'anyKey': 'anyValue'} + + # Set up parameter values + assistant_id = 'testString' + environment_id = 'testString' + session_id = 'testString' + input = message_input_model + context = message_context_model + user_id = 'testString' + + # Invoke method + response = _service.message( + assistant_id, + environment_id, + session_id, + input=input, + context=context, + user_id=user_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['input'] == message_input_model + assert req_body['context'] == message_context_model + assert req_body['user_id'] == 'testString' + + def test_message_all_params_with_retries(self): + # Enable retries and run test_message_all_params. + _service.enable_retries() + self.test_message_all_params() + + # Disable retries and run test_message_all_params. + _service.disable_retries() + self.test_message_all_params() + + @responses.activate + def test_message_required_params(self): + """ + test_message_required_params() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/environments/testString/sessions/testString/message') + mock_response = '{"output": {"generic": [{"response_type": "conversation_search", "text": "text", "citations_title": "citations_title", "citations": [{"title": "title", "text": "text", "body": "body", "search_result_index": 19, "ranges": [{"start": 5, "end": 3}]}], "confidence_scores": {"threshold": 9, "pre_gen": 7, "post_gen": 8, "extractiveness": 14}, "response_length_option": "response_length_option", "search_results": [{"result_metadata": {"document_retrieval_source": "document_retrieval_source", "score": 5}, "id": "id", "title": "title", "body": "body"}], "disclaimer": "disclaimer"}], "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed", "turn_events": [{"event": "action_visited", "source": {"type": "action", "action": "action", "action_title": "action_title", "condition": "condition"}, "action_start_time": "action_start_time", "condition_type": "user_defined", "reason": "intent", "result_variable": "result_variable"}]}, "user_defined": {"anyKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}, "llm_metadata": [{"task": "task", "model_id": "model_id"}]}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time", "session_start_time": "session_start_time", "state": "state", "skip_user_input": false}, "session_id": "session_id"}, "skills": {"main skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}}, "actions skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}, "action_variables": {"anyKey": "anyValue"}, "skill_variables": {"anyKey": "anyValue"}}}, "integrations": {"anyKey": "anyValue"}}, "user_id": "user_id", "masked_output": {"generic": [{"response_type": "conversation_search", "text": "text", "citations_title": "citations_title", "citations": [{"title": "title", "text": "text", "body": "body", "search_result_index": 19, "ranges": [{"start": 5, "end": 3}]}], "confidence_scores": {"threshold": 9, "pre_gen": 7, "post_gen": 8, "extractiveness": 14}, "response_length_option": "response_length_option", "search_results": [{"result_metadata": {"document_retrieval_source": "document_retrieval_source", "score": 5}, "id": "id", "title": "title", "body": "body"}], "disclaimer": "disclaimer"}], "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed", "turn_events": [{"event": "action_visited", "source": {"type": "action", "action": "action", "action_title": "action_title", "condition": "condition"}, "action_start_time": "action_start_time", "condition_type": "user_defined", "reason": "intent", "result_variable": "result_variable"}]}, "user_defined": {"anyKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}, "llm_metadata": [{"task": "task", "model_id": "model_id"}]}, "masked_input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "suggestion_id": "suggestion_id", "attachments": [{"url": "url", "media_type": "media_type"}], "analytics": {"browser": "browser", "device": "device", "pageUrl": "page_url"}, "options": {"restart": false, "alternate_intents": false, "async_callout": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": false, "export": false}}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + environment_id = 'testString' + session_id = 'testString' + + # Invoke method + response = _service.message( + assistant_id, + environment_id, + session_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_message_required_params_with_retries(self): + # Enable retries and run test_message_required_params. + _service.enable_retries() + self.test_message_required_params() + + # Disable retries and run test_message_required_params. + _service.disable_retries() + self.test_message_required_params() + + @responses.activate + def test_message_value_error(self): + """ + test_message_value_error() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/environments/testString/sessions/testString/message') + mock_response = '{"output": {"generic": [{"response_type": "conversation_search", "text": "text", "citations_title": "citations_title", "citations": [{"title": "title", "text": "text", "body": "body", "search_result_index": 19, "ranges": [{"start": 5, "end": 3}]}], "confidence_scores": {"threshold": 9, "pre_gen": 7, "post_gen": 8, "extractiveness": 14}, "response_length_option": "response_length_option", "search_results": [{"result_metadata": {"document_retrieval_source": "document_retrieval_source", "score": 5}, "id": "id", "title": "title", "body": "body"}], "disclaimer": "disclaimer"}], "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed", "turn_events": [{"event": "action_visited", "source": {"type": "action", "action": "action", "action_title": "action_title", "condition": "condition"}, "action_start_time": "action_start_time", "condition_type": "user_defined", "reason": "intent", "result_variable": "result_variable"}]}, "user_defined": {"anyKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}, "llm_metadata": [{"task": "task", "model_id": "model_id"}]}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time", "session_start_time": "session_start_time", "state": "state", "skip_user_input": false}, "session_id": "session_id"}, "skills": {"main skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}}, "actions skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}, "action_variables": {"anyKey": "anyValue"}, "skill_variables": {"anyKey": "anyValue"}}}, "integrations": {"anyKey": "anyValue"}}, "user_id": "user_id", "masked_output": {"generic": [{"response_type": "conversation_search", "text": "text", "citations_title": "citations_title", "citations": [{"title": "title", "text": "text", "body": "body", "search_result_index": 19, "ranges": [{"start": 5, "end": 3}]}], "confidence_scores": {"threshold": 9, "pre_gen": 7, "post_gen": 8, "extractiveness": 14}, "response_length_option": "response_length_option", "search_results": [{"result_metadata": {"document_retrieval_source": "document_retrieval_source", "score": 5}, "id": "id", "title": "title", "body": "body"}], "disclaimer": "disclaimer"}], "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed", "turn_events": [{"event": "action_visited", "source": {"type": "action", "action": "action", "action_title": "action_title", "condition": "condition"}, "action_start_time": "action_start_time", "condition_type": "user_defined", "reason": "intent", "result_variable": "result_variable"}]}, "user_defined": {"anyKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}, "llm_metadata": [{"task": "task", "model_id": "model_id"}]}, "masked_input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "suggestion_id": "suggestion_id", "attachments": [{"url": "url", "media_type": "media_type"}], "analytics": {"browser": "browser", "device": "device", "pageUrl": "page_url"}, "options": {"restart": false, "alternate_intents": false, "async_callout": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": false, "export": false}}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + environment_id = 'testString' + session_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "assistant_id": assistant_id, + "environment_id": environment_id, + "session_id": session_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.message(**req_copy) + + def test_message_value_error_with_retries(self): + # Enable retries and run test_message_value_error. + _service.enable_retries() + self.test_message_value_error() + + # Disable retries and run test_message_value_error. + _service.disable_retries() + self.test_message_value_error() + + +class TestMessageStateless: + """ + Test Class for message_stateless + """ + + @responses.activate + def test_message_stateless_all_params(self): + """ + message_stateless() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/environments/testString/message') + mock_response = '{"output": {"generic": [{"response_type": "conversation_search", "text": "text", "citations_title": "citations_title", "citations": [{"title": "title", "text": "text", "body": "body", "search_result_index": 19, "ranges": [{"start": 5, "end": 3}]}], "confidence_scores": {"threshold": 9, "pre_gen": 7, "post_gen": 8, "extractiveness": 14}, "response_length_option": "response_length_option", "search_results": [{"result_metadata": {"document_retrieval_source": "document_retrieval_source", "score": 5}, "id": "id", "title": "title", "body": "body"}], "disclaimer": "disclaimer"}], "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed", "turn_events": [{"event": "action_visited", "source": {"type": "action", "action": "action", "action_title": "action_title", "condition": "condition"}, "action_start_time": "action_start_time", "condition_type": "user_defined", "reason": "intent", "result_variable": "result_variable"}]}, "user_defined": {"anyKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}, "llm_metadata": [{"task": "task", "model_id": "model_id"}]}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time", "session_start_time": "session_start_time", "state": "state", "skip_user_input": false}, "session_id": "session_id"}, "skills": {"main skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}}, "actions skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}, "action_variables": {"anyKey": "anyValue"}, "skill_variables": {"anyKey": "anyValue"}, "private_action_variables": {"anyKey": "anyValue"}, "private_skill_variables": {"anyKey": "anyValue"}}}, "integrations": {"anyKey": "anyValue"}}, "masked_output": {"generic": [{"response_type": "conversation_search", "text": "text", "citations_title": "citations_title", "citations": [{"title": "title", "text": "text", "body": "body", "search_result_index": 19, "ranges": [{"start": 5, "end": 3}]}], "confidence_scores": {"threshold": 9, "pre_gen": 7, "post_gen": 8, "extractiveness": 14}, "response_length_option": "response_length_option", "search_results": [{"result_metadata": {"document_retrieval_source": "document_retrieval_source", "score": 5}, "id": "id", "title": "title", "body": "body"}], "disclaimer": "disclaimer"}], "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed", "turn_events": [{"event": "action_visited", "source": {"type": "action", "action": "action", "action_title": "action_title", "condition": "condition"}, "action_start_time": "action_start_time", "condition_type": "user_defined", "reason": "intent", "result_variable": "result_variable"}]}, "user_defined": {"anyKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}, "llm_metadata": [{"task": "task", "model_id": "model_id"}]}, "masked_input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "suggestion_id": "suggestion_id", "attachments": [{"url": "url", "media_type": "media_type"}], "analytics": {"browser": "browser", "device": "device", "pageUrl": "page_url"}, "options": {"restart": false, "alternate_intents": false, "async_callout": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": false, "export": false}}, "user_id": "user_id"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Construct a dict representation of a RuntimeIntent model + runtime_intent_model = {} + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + runtime_intent_model['skill'] = 'testString' + + # Construct a dict representation of a CaptureGroup model + capture_group_model = {} + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + # Construct a dict representation of a RuntimeEntityInterpretation model + runtime_entity_interpretation_model = {} + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + # Construct a dict representation of a RuntimeEntityAlternative model + runtime_entity_alternative_model = {} + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + # Construct a dict representation of a RuntimeEntityRole model + runtime_entity_role_model = {} + runtime_entity_role_model['type'] = 'date_from' + + # Construct a dict representation of a RuntimeEntity model + runtime_entity_model = {} + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + runtime_entity_model['skill'] = 'testString' + + # Construct a dict representation of a MessageInputAttachment model + message_input_attachment_model = {} + message_input_attachment_model['url'] = 'testString' + message_input_attachment_model['media_type'] = 'testString' + + # Construct a dict representation of a RequestAnalytics model + request_analytics_model = {} + request_analytics_model['browser'] = 'testString' + request_analytics_model['device'] = 'testString' + request_analytics_model['pageUrl'] = 'testString' + + # Construct a dict representation of a MessageInputOptionsSpelling model + message_input_options_spelling_model = {} + message_input_options_spelling_model['suggestions'] = True + message_input_options_spelling_model['auto_correct'] = True + + # Construct a dict representation of a StatelessMessageInputOptions model + stateless_message_input_options_model = {} + stateless_message_input_options_model['restart'] = False + stateless_message_input_options_model['alternate_intents'] = False + stateless_message_input_options_model['async_callout'] = False + stateless_message_input_options_model['spelling'] = message_input_options_spelling_model + stateless_message_input_options_model['debug'] = False + + # Construct a dict representation of a StatelessMessageInput model + stateless_message_input_model = {} + stateless_message_input_model['message_type'] = 'text' + stateless_message_input_model['text'] = 'testString' + stateless_message_input_model['intents'] = [runtime_intent_model] + stateless_message_input_model['entities'] = [runtime_entity_model] + stateless_message_input_model['suggestion_id'] = 'testString' + stateless_message_input_model['attachments'] = [message_input_attachment_model] + stateless_message_input_model['analytics'] = request_analytics_model + stateless_message_input_model['options'] = stateless_message_input_options_model + + # Construct a dict representation of a MessageContextGlobalSystem model + message_context_global_system_model = {} + message_context_global_system_model['timezone'] = 'testString' + message_context_global_system_model['user_id'] = 'testString' + message_context_global_system_model['turn_count'] = 38 + message_context_global_system_model['locale'] = 'en-us' + message_context_global_system_model['reference_time'] = 'testString' + message_context_global_system_model['session_start_time'] = 'testString' + message_context_global_system_model['state'] = 'testString' + message_context_global_system_model['skip_user_input'] = True + + # Construct a dict representation of a StatelessMessageContextGlobal model + stateless_message_context_global_model = {} + stateless_message_context_global_model['system'] = message_context_global_system_model + stateless_message_context_global_model['session_id'] = 'testString' + + # Construct a dict representation of a MessageContextSkillSystem model + message_context_skill_system_model = {} + message_context_skill_system_model['state'] = 'testString' + message_context_skill_system_model['foo'] = 'testString' + + # Construct a dict representation of a MessageContextDialogSkill model + message_context_dialog_skill_model = {} + message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'} + message_context_dialog_skill_model['system'] = message_context_skill_system_model + + # Construct a dict representation of a StatelessMessageContextSkillsActionsSkill model + stateless_message_context_skills_actions_skill_model = {} + stateless_message_context_skills_actions_skill_model['user_defined'] = {'anyKey': 'anyValue'} + stateless_message_context_skills_actions_skill_model['system'] = message_context_skill_system_model + stateless_message_context_skills_actions_skill_model['action_variables'] = {'anyKey': 'anyValue'} + stateless_message_context_skills_actions_skill_model['skill_variables'] = {'anyKey': 'anyValue'} + stateless_message_context_skills_actions_skill_model['private_action_variables'] = {'anyKey': 'anyValue'} + stateless_message_context_skills_actions_skill_model['private_skill_variables'] = {'anyKey': 'anyValue'} + + # Construct a dict representation of a StatelessMessageContextSkills model + stateless_message_context_skills_model = {} + stateless_message_context_skills_model['main skill'] = message_context_dialog_skill_model + stateless_message_context_skills_model['actions skill'] = stateless_message_context_skills_actions_skill_model + + # Construct a dict representation of a StatelessMessageContext model + stateless_message_context_model = {} + stateless_message_context_model['global'] = stateless_message_context_global_model + stateless_message_context_model['skills'] = stateless_message_context_skills_model + stateless_message_context_model['integrations'] = {'anyKey': 'anyValue'} + + # Set up parameter values + assistant_id = 'testString' + environment_id = 'testString' + input = stateless_message_input_model + context = stateless_message_context_model + user_id = 'testString' + + # Invoke method + response = _service.message_stateless( + assistant_id, + environment_id, + input=input, + context=context, + user_id=user_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['input'] == stateless_message_input_model + assert req_body['context'] == stateless_message_context_model + assert req_body['user_id'] == 'testString' + + def test_message_stateless_all_params_with_retries(self): + # Enable retries and run test_message_stateless_all_params. + _service.enable_retries() + self.test_message_stateless_all_params() + + # Disable retries and run test_message_stateless_all_params. + _service.disable_retries() + self.test_message_stateless_all_params() + + @responses.activate + def test_message_stateless_required_params(self): + """ + test_message_stateless_required_params() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/environments/testString/message') + mock_response = '{"output": {"generic": [{"response_type": "conversation_search", "text": "text", "citations_title": "citations_title", "citations": [{"title": "title", "text": "text", "body": "body", "search_result_index": 19, "ranges": [{"start": 5, "end": 3}]}], "confidence_scores": {"threshold": 9, "pre_gen": 7, "post_gen": 8, "extractiveness": 14}, "response_length_option": "response_length_option", "search_results": [{"result_metadata": {"document_retrieval_source": "document_retrieval_source", "score": 5}, "id": "id", "title": "title", "body": "body"}], "disclaimer": "disclaimer"}], "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed", "turn_events": [{"event": "action_visited", "source": {"type": "action", "action": "action", "action_title": "action_title", "condition": "condition"}, "action_start_time": "action_start_time", "condition_type": "user_defined", "reason": "intent", "result_variable": "result_variable"}]}, "user_defined": {"anyKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}, "llm_metadata": [{"task": "task", "model_id": "model_id"}]}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time", "session_start_time": "session_start_time", "state": "state", "skip_user_input": false}, "session_id": "session_id"}, "skills": {"main skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}}, "actions skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}, "action_variables": {"anyKey": "anyValue"}, "skill_variables": {"anyKey": "anyValue"}, "private_action_variables": {"anyKey": "anyValue"}, "private_skill_variables": {"anyKey": "anyValue"}}}, "integrations": {"anyKey": "anyValue"}}, "masked_output": {"generic": [{"response_type": "conversation_search", "text": "text", "citations_title": "citations_title", "citations": [{"title": "title", "text": "text", "body": "body", "search_result_index": 19, "ranges": [{"start": 5, "end": 3}]}], "confidence_scores": {"threshold": 9, "pre_gen": 7, "post_gen": 8, "extractiveness": 14}, "response_length_option": "response_length_option", "search_results": [{"result_metadata": {"document_retrieval_source": "document_retrieval_source", "score": 5}, "id": "id", "title": "title", "body": "body"}], "disclaimer": "disclaimer"}], "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed", "turn_events": [{"event": "action_visited", "source": {"type": "action", "action": "action", "action_title": "action_title", "condition": "condition"}, "action_start_time": "action_start_time", "condition_type": "user_defined", "reason": "intent", "result_variable": "result_variable"}]}, "user_defined": {"anyKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}, "llm_metadata": [{"task": "task", "model_id": "model_id"}]}, "masked_input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "suggestion_id": "suggestion_id", "attachments": [{"url": "url", "media_type": "media_type"}], "analytics": {"browser": "browser", "device": "device", "pageUrl": "page_url"}, "options": {"restart": false, "alternate_intents": false, "async_callout": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": false, "export": false}}, "user_id": "user_id"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + environment_id = 'testString' + + # Invoke method + response = _service.message_stateless( + assistant_id, + environment_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_message_stateless_required_params_with_retries(self): + # Enable retries and run test_message_stateless_required_params. + _service.enable_retries() + self.test_message_stateless_required_params() + + # Disable retries and run test_message_stateless_required_params. + _service.disable_retries() + self.test_message_stateless_required_params() + + @responses.activate + def test_message_stateless_value_error(self): + """ + test_message_stateless_value_error() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/environments/testString/message') + mock_response = '{"output": {"generic": [{"response_type": "conversation_search", "text": "text", "citations_title": "citations_title", "citations": [{"title": "title", "text": "text", "body": "body", "search_result_index": 19, "ranges": [{"start": 5, "end": 3}]}], "confidence_scores": {"threshold": 9, "pre_gen": 7, "post_gen": 8, "extractiveness": 14}, "response_length_option": "response_length_option", "search_results": [{"result_metadata": {"document_retrieval_source": "document_retrieval_source", "score": 5}, "id": "id", "title": "title", "body": "body"}], "disclaimer": "disclaimer"}], "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed", "turn_events": [{"event": "action_visited", "source": {"type": "action", "action": "action", "action_title": "action_title", "condition": "condition"}, "action_start_time": "action_start_time", "condition_type": "user_defined", "reason": "intent", "result_variable": "result_variable"}]}, "user_defined": {"anyKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}, "llm_metadata": [{"task": "task", "model_id": "model_id"}]}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time", "session_start_time": "session_start_time", "state": "state", "skip_user_input": false}, "session_id": "session_id"}, "skills": {"main skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}}, "actions skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}, "action_variables": {"anyKey": "anyValue"}, "skill_variables": {"anyKey": "anyValue"}, "private_action_variables": {"anyKey": "anyValue"}, "private_skill_variables": {"anyKey": "anyValue"}}}, "integrations": {"anyKey": "anyValue"}}, "masked_output": {"generic": [{"response_type": "conversation_search", "text": "text", "citations_title": "citations_title", "citations": [{"title": "title", "text": "text", "body": "body", "search_result_index": 19, "ranges": [{"start": 5, "end": 3}]}], "confidence_scores": {"threshold": 9, "pre_gen": 7, "post_gen": 8, "extractiveness": 14}, "response_length_option": "response_length_option", "search_results": [{"result_metadata": {"document_retrieval_source": "document_retrieval_source", "score": 5}, "id": "id", "title": "title", "body": "body"}], "disclaimer": "disclaimer"}], "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed", "turn_events": [{"event": "action_visited", "source": {"type": "action", "action": "action", "action_title": "action_title", "condition": "condition"}, "action_start_time": "action_start_time", "condition_type": "user_defined", "reason": "intent", "result_variable": "result_variable"}]}, "user_defined": {"anyKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}, "llm_metadata": [{"task": "task", "model_id": "model_id"}]}, "masked_input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "suggestion_id": "suggestion_id", "attachments": [{"url": "url", "media_type": "media_type"}], "analytics": {"browser": "browser", "device": "device", "pageUrl": "page_url"}, "options": {"restart": false, "alternate_intents": false, "async_callout": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": false, "export": false}}, "user_id": "user_id"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + environment_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "assistant_id": assistant_id, + "environment_id": environment_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.message_stateless(**req_copy) + + def test_message_stateless_value_error_with_retries(self): + # Enable retries and run test_message_stateless_value_error. + _service.enable_retries() + self.test_message_stateless_value_error() + + # Disable retries and run test_message_stateless_value_error. + _service.disable_retries() + self.test_message_stateless_value_error() + + +# endregion +############################################################################## +# End of Service: Message +############################################################################## + +############################################################################## +# Start of Service: MessageStream +############################################################################## +# region + + +class TestMessageStream: + """ + Test Class for message_stream + """ + + @responses.activate + def test_message_stream_all_params(self): + """ + message_stream() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/environments/testString/sessions/testString/message_stream') + mock_response = 'This is a mock binary response.' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='text/event-stream', + status=200, + ) + + # Construct a dict representation of a RuntimeIntent model + runtime_intent_model = {} + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + runtime_intent_model['skill'] = 'testString' + + # Construct a dict representation of a CaptureGroup model + capture_group_model = {} + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + # Construct a dict representation of a RuntimeEntityInterpretation model + runtime_entity_interpretation_model = {} + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + # Construct a dict representation of a RuntimeEntityAlternative model + runtime_entity_alternative_model = {} + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + # Construct a dict representation of a RuntimeEntityRole model + runtime_entity_role_model = {} + runtime_entity_role_model['type'] = 'date_from' + + # Construct a dict representation of a RuntimeEntity model + runtime_entity_model = {} + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + runtime_entity_model['skill'] = 'testString' + + # Construct a dict representation of a MessageInputAttachment model + message_input_attachment_model = {} + message_input_attachment_model['url'] = 'testString' + message_input_attachment_model['media_type'] = 'testString' + + # Construct a dict representation of a RequestAnalytics model + request_analytics_model = {} + request_analytics_model['browser'] = 'testString' + request_analytics_model['device'] = 'testString' + request_analytics_model['pageUrl'] = 'testString' + + # Construct a dict representation of a MessageInputOptionsSpelling model + message_input_options_spelling_model = {} + message_input_options_spelling_model['suggestions'] = True + message_input_options_spelling_model['auto_correct'] = True + + # Construct a dict representation of a MessageInputOptions model + message_input_options_model = {} + message_input_options_model['restart'] = False + message_input_options_model['alternate_intents'] = False + message_input_options_model['async_callout'] = False + message_input_options_model['spelling'] = message_input_options_spelling_model + message_input_options_model['debug'] = False + message_input_options_model['return_context'] = False + message_input_options_model['export'] = False + + # Construct a dict representation of a MessageInput model + message_input_model = {} + message_input_model['message_type'] = 'text' + message_input_model['text'] = 'testString' + message_input_model['intents'] = [runtime_intent_model] + message_input_model['entities'] = [runtime_entity_model] + message_input_model['suggestion_id'] = 'testString' + message_input_model['attachments'] = [message_input_attachment_model] + message_input_model['analytics'] = request_analytics_model + message_input_model['options'] = message_input_options_model + + # Construct a dict representation of a MessageContextGlobalSystem model + message_context_global_system_model = {} + message_context_global_system_model['timezone'] = 'testString' + message_context_global_system_model['user_id'] = 'testString' + message_context_global_system_model['turn_count'] = 38 + message_context_global_system_model['locale'] = 'en-us' + message_context_global_system_model['reference_time'] = 'testString' + message_context_global_system_model['session_start_time'] = 'testString' + message_context_global_system_model['state'] = 'testString' + message_context_global_system_model['skip_user_input'] = True + + # Construct a dict representation of a MessageContextGlobal model + message_context_global_model = {} + message_context_global_model['system'] = message_context_global_system_model + + # Construct a dict representation of a MessageContextSkillSystem model + message_context_skill_system_model = {} + message_context_skill_system_model['state'] = 'testString' + message_context_skill_system_model['foo'] = 'testString' + + # Construct a dict representation of a MessageContextDialogSkill model + message_context_dialog_skill_model = {} + message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'} + message_context_dialog_skill_model['system'] = message_context_skill_system_model + + # Construct a dict representation of a MessageContextActionSkill model + message_context_action_skill_model = {} + message_context_action_skill_model['user_defined'] = {'anyKey': 'anyValue'} + message_context_action_skill_model['system'] = message_context_skill_system_model + message_context_action_skill_model['action_variables'] = {'anyKey': 'anyValue'} + message_context_action_skill_model['skill_variables'] = {'anyKey': 'anyValue'} + + # Construct a dict representation of a MessageContextSkills model + message_context_skills_model = {} + message_context_skills_model['main skill'] = message_context_dialog_skill_model + message_context_skills_model['actions skill'] = message_context_action_skill_model + + # Construct a dict representation of a MessageContext model + message_context_model = {} + message_context_model['global'] = message_context_global_model + message_context_model['skills'] = message_context_skills_model + message_context_model['integrations'] = {'anyKey': 'anyValue'} + + # Set up parameter values + assistant_id = 'testString' + environment_id = 'testString' + session_id = 'testString' + input = message_input_model + context = message_context_model + user_id = 'testString' + + # Invoke method + response = _service.message_stream( + assistant_id, + environment_id, + session_id, + input=input, + context=context, + user_id=user_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['input'] == message_input_model + assert req_body['context'] == message_context_model + assert req_body['user_id'] == 'testString' + + def test_message_stream_all_params_with_retries(self): + # Enable retries and run test_message_stream_all_params. + _service.enable_retries() + self.test_message_stream_all_params() + + # Disable retries and run test_message_stream_all_params. + _service.disable_retries() + self.test_message_stream_all_params() + + @responses.activate + def test_message_stream_required_params(self): + """ + test_message_stream_required_params() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/environments/testString/sessions/testString/message_stream') + mock_response = 'This is a mock binary response.' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='text/event-stream', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + environment_id = 'testString' + session_id = 'testString' + + # Invoke method + response = _service.message_stream( + assistant_id, + environment_id, + session_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_message_stream_required_params_with_retries(self): + # Enable retries and run test_message_stream_required_params. + _service.enable_retries() + self.test_message_stream_required_params() + + # Disable retries and run test_message_stream_required_params. + _service.disable_retries() + self.test_message_stream_required_params() + + @responses.activate + def test_message_stream_value_error(self): + """ + test_message_stream_value_error() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/environments/testString/sessions/testString/message_stream') + mock_response = 'This is a mock binary response.' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='text/event-stream', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + environment_id = 'testString' + session_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "assistant_id": assistant_id, + "environment_id": environment_id, + "session_id": session_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.message_stream(**req_copy) + + def test_message_stream_value_error_with_retries(self): + # Enable retries and run test_message_stream_value_error. + _service.enable_retries() + self.test_message_stream_value_error() + + # Disable retries and run test_message_stream_value_error. + _service.disable_retries() + self.test_message_stream_value_error() + + +class TestMessageStreamStateless: + """ + Test Class for message_stream_stateless + """ + + @responses.activate + def test_message_stream_stateless_all_params(self): + """ + message_stream_stateless() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/environments/testString/message_stream') + mock_response = 'This is a mock binary response.' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='text/event-stream', + status=200, + ) + + # Construct a dict representation of a RuntimeIntent model + runtime_intent_model = {} + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + runtime_intent_model['skill'] = 'testString' + + # Construct a dict representation of a CaptureGroup model + capture_group_model = {} + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + # Construct a dict representation of a RuntimeEntityInterpretation model + runtime_entity_interpretation_model = {} + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + # Construct a dict representation of a RuntimeEntityAlternative model + runtime_entity_alternative_model = {} + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + # Construct a dict representation of a RuntimeEntityRole model + runtime_entity_role_model = {} + runtime_entity_role_model['type'] = 'date_from' + + # Construct a dict representation of a RuntimeEntity model + runtime_entity_model = {} + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + runtime_entity_model['skill'] = 'testString' + + # Construct a dict representation of a MessageInputAttachment model + message_input_attachment_model = {} + message_input_attachment_model['url'] = 'testString' + message_input_attachment_model['media_type'] = 'testString' + + # Construct a dict representation of a RequestAnalytics model + request_analytics_model = {} + request_analytics_model['browser'] = 'testString' + request_analytics_model['device'] = 'testString' + request_analytics_model['pageUrl'] = 'testString' + + # Construct a dict representation of a MessageInputOptionsSpelling model + message_input_options_spelling_model = {} + message_input_options_spelling_model['suggestions'] = True + message_input_options_spelling_model['auto_correct'] = True + + # Construct a dict representation of a MessageInputOptions model + message_input_options_model = {} + message_input_options_model['restart'] = False + message_input_options_model['alternate_intents'] = False + message_input_options_model['async_callout'] = False + message_input_options_model['spelling'] = message_input_options_spelling_model + message_input_options_model['debug'] = False + message_input_options_model['return_context'] = False + message_input_options_model['export'] = False + + # Construct a dict representation of a MessageInput model + message_input_model = {} + message_input_model['message_type'] = 'text' + message_input_model['text'] = 'testString' + message_input_model['intents'] = [runtime_intent_model] + message_input_model['entities'] = [runtime_entity_model] + message_input_model['suggestion_id'] = 'testString' + message_input_model['attachments'] = [message_input_attachment_model] + message_input_model['analytics'] = request_analytics_model + message_input_model['options'] = message_input_options_model + + # Construct a dict representation of a MessageContextGlobalSystem model + message_context_global_system_model = {} + message_context_global_system_model['timezone'] = 'testString' + message_context_global_system_model['user_id'] = 'testString' + message_context_global_system_model['turn_count'] = 38 + message_context_global_system_model['locale'] = 'en-us' + message_context_global_system_model['reference_time'] = 'testString' + message_context_global_system_model['session_start_time'] = 'testString' + message_context_global_system_model['state'] = 'testString' + message_context_global_system_model['skip_user_input'] = True + + # Construct a dict representation of a MessageContextGlobal model + message_context_global_model = {} + message_context_global_model['system'] = message_context_global_system_model + + # Construct a dict representation of a MessageContextSkillSystem model + message_context_skill_system_model = {} + message_context_skill_system_model['state'] = 'testString' + message_context_skill_system_model['foo'] = 'testString' + + # Construct a dict representation of a MessageContextDialogSkill model + message_context_dialog_skill_model = {} + message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'} + message_context_dialog_skill_model['system'] = message_context_skill_system_model + + # Construct a dict representation of a MessageContextActionSkill model + message_context_action_skill_model = {} + message_context_action_skill_model['user_defined'] = {'anyKey': 'anyValue'} + message_context_action_skill_model['system'] = message_context_skill_system_model + message_context_action_skill_model['action_variables'] = {'anyKey': 'anyValue'} + message_context_action_skill_model['skill_variables'] = {'anyKey': 'anyValue'} + + # Construct a dict representation of a MessageContextSkills model + message_context_skills_model = {} + message_context_skills_model['main skill'] = message_context_dialog_skill_model + message_context_skills_model['actions skill'] = message_context_action_skill_model + + # Construct a dict representation of a MessageContext model + message_context_model = {} + message_context_model['global'] = message_context_global_model + message_context_model['skills'] = message_context_skills_model + message_context_model['integrations'] = {'anyKey': 'anyValue'} + + # Set up parameter values + assistant_id = 'testString' + environment_id = 'testString' + input = message_input_model + context = message_context_model + user_id = 'testString' + + # Invoke method + response = _service.message_stream_stateless( + assistant_id, + environment_id, + input=input, + context=context, + user_id=user_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['input'] == message_input_model + assert req_body['context'] == message_context_model + assert req_body['user_id'] == 'testString' + + def test_message_stream_stateless_all_params_with_retries(self): + # Enable retries and run test_message_stream_stateless_all_params. + _service.enable_retries() + self.test_message_stream_stateless_all_params() + + # Disable retries and run test_message_stream_stateless_all_params. + _service.disable_retries() + self.test_message_stream_stateless_all_params() + + @responses.activate + def test_message_stream_stateless_required_params(self): + """ + test_message_stream_stateless_required_params() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/environments/testString/message_stream') + mock_response = 'This is a mock binary response.' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='text/event-stream', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + environment_id = 'testString' + + # Invoke method + response = _service.message_stream_stateless( + assistant_id, + environment_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_message_stream_stateless_required_params_with_retries(self): + # Enable retries and run test_message_stream_stateless_required_params. + _service.enable_retries() + self.test_message_stream_stateless_required_params() + + # Disable retries and run test_message_stream_stateless_required_params. + _service.disable_retries() + self.test_message_stream_stateless_required_params() + + @responses.activate + def test_message_stream_stateless_value_error(self): + """ + test_message_stream_stateless_value_error() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/environments/testString/message_stream') + mock_response = 'This is a mock binary response.' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='text/event-stream', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + environment_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "assistant_id": assistant_id, + "environment_id": environment_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.message_stream_stateless(**req_copy) + + def test_message_stream_stateless_value_error_with_retries(self): + # Enable retries and run test_message_stream_stateless_value_error. + _service.enable_retries() + self.test_message_stream_stateless_value_error() + + # Disable retries and run test_message_stream_stateless_value_error. + _service.disable_retries() + self.test_message_stream_stateless_value_error() + + +# endregion +############################################################################## +# End of Service: MessageStream +############################################################################## + +############################################################################## +# Start of Service: BulkClassify +############################################################################## +# region + + +class TestBulkClassify: + """ + Test Class for bulk_classify + """ + + @responses.activate + def test_bulk_classify_all_params(self): + """ + bulk_classify() + """ + # Set up mock + url = preprocess_url('/v2/skills/testString/workspace/bulk_classify') + mock_response = '{"output": [{"input": {"text": "text"}, "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}]}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Construct a dict representation of a BulkClassifyUtterance model + bulk_classify_utterance_model = {} + bulk_classify_utterance_model['text'] = 'testString' + + # Set up parameter values + skill_id = 'testString' + input = [bulk_classify_utterance_model] + + # Invoke method + response = _service.bulk_classify( + skill_id, + input, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['input'] == [bulk_classify_utterance_model] + + def test_bulk_classify_all_params_with_retries(self): + # Enable retries and run test_bulk_classify_all_params. + _service.enable_retries() + self.test_bulk_classify_all_params() + + # Disable retries and run test_bulk_classify_all_params. + _service.disable_retries() + self.test_bulk_classify_all_params() + + @responses.activate + def test_bulk_classify_value_error(self): + """ + test_bulk_classify_value_error() + """ + # Set up mock + url = preprocess_url('/v2/skills/testString/workspace/bulk_classify') + mock_response = '{"output": [{"input": {"text": "text"}, "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}]}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Construct a dict representation of a BulkClassifyUtterance model + bulk_classify_utterance_model = {} + bulk_classify_utterance_model['text'] = 'testString' + + # Set up parameter values + skill_id = 'testString' + input = [bulk_classify_utterance_model] + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "skill_id": skill_id, + "input": input, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.bulk_classify(**req_copy) + + def test_bulk_classify_value_error_with_retries(self): + # Enable retries and run test_bulk_classify_value_error. + _service.enable_retries() + self.test_bulk_classify_value_error() + + # Disable retries and run test_bulk_classify_value_error. + _service.disable_retries() + self.test_bulk_classify_value_error() + + +# endregion +############################################################################## +# End of Service: BulkClassify +############################################################################## + +############################################################################## +# Start of Service: Logs +############################################################################## +# region + + +class TestListLogs: + """ + Test Class for list_logs + """ + + @responses.activate + def test_list_logs_all_params(self): + """ + list_logs() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/logs') + mock_response = '{"logs": [{"log_id": "log_id", "request": {"input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "suggestion_id": "suggestion_id", "attachments": [{"url": "url", "media_type": "media_type"}], "analytics": {"browser": "browser", "device": "device", "pageUrl": "page_url"}, "options": {"restart": false, "alternate_intents": false, "async_callout": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": false, "export": false}}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time", "session_start_time": "session_start_time", "state": "state", "skip_user_input": false}, "session_id": "session_id"}, "skills": {"main skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}}, "actions skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}, "action_variables": {"anyKey": "anyValue"}, "skill_variables": {"anyKey": "anyValue"}}}, "integrations": {"anyKey": "anyValue"}}, "user_id": "user_id"}, "response": {"output": {"generic": [{"response_type": "conversation_search", "text": "text", "citations_title": "citations_title", "citations": [{"title": "title", "text": "text", "body": "body", "search_result_index": 19, "ranges": [{"start": 5, "end": 3}]}], "confidence_scores": {"threshold": 9, "pre_gen": 7, "post_gen": 8, "extractiveness": 14}, "response_length_option": "response_length_option", "search_results": [{"result_metadata": {"document_retrieval_source": "document_retrieval_source", "score": 5}, "id": "id", "title": "title", "body": "body"}], "disclaimer": "disclaimer"}], "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed", "turn_events": [{"event": "action_visited", "source": {"type": "action", "action": "action", "action_title": "action_title", "condition": "condition"}, "action_start_time": "action_start_time", "condition_type": "user_defined", "reason": "intent", "result_variable": "result_variable"}]}, "user_defined": {"anyKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}, "llm_metadata": [{"task": "task", "model_id": "model_id"}]}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time", "session_start_time": "session_start_time", "state": "state", "skip_user_input": false}, "session_id": "session_id"}, "skills": {"main skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}}, "actions skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}, "action_variables": {"anyKey": "anyValue"}, "skill_variables": {"anyKey": "anyValue"}}}, "integrations": {"anyKey": "anyValue"}}, "user_id": "user_id"}, "assistant_id": "assistant_id", "session_id": "session_id", "skill_id": "skill_id", "snapshot": "snapshot", "request_timestamp": "request_timestamp", "response_timestamp": "response_timestamp", "language": "language", "customer_id": "customer_id"}], "pagination": {"next_url": "next_url", "matched": 7, "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + sort = 'testString' + filter = 'testString' + page_limit = 100 + cursor = 'testString' + + # Invoke method + response = _service.list_logs( + assistant_id, + sort=sort, + filter=filter, + page_limit=page_limit, + cursor=cursor, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'sort={}'.format(sort) in query_string + assert 'filter={}'.format(filter) in query_string + assert 'page_limit={}'.format(page_limit) in query_string + assert 'cursor={}'.format(cursor) in query_string + + def test_list_logs_all_params_with_retries(self): + # Enable retries and run test_list_logs_all_params. + _service.enable_retries() + self.test_list_logs_all_params() + + # Disable retries and run test_list_logs_all_params. + _service.disable_retries() + self.test_list_logs_all_params() + + @responses.activate + def test_list_logs_required_params(self): + """ + test_list_logs_required_params() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/logs') + mock_response = '{"logs": [{"log_id": "log_id", "request": {"input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "suggestion_id": "suggestion_id", "attachments": [{"url": "url", "media_type": "media_type"}], "analytics": {"browser": "browser", "device": "device", "pageUrl": "page_url"}, "options": {"restart": false, "alternate_intents": false, "async_callout": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": false, "export": false}}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time", "session_start_time": "session_start_time", "state": "state", "skip_user_input": false}, "session_id": "session_id"}, "skills": {"main skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}}, "actions skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}, "action_variables": {"anyKey": "anyValue"}, "skill_variables": {"anyKey": "anyValue"}}}, "integrations": {"anyKey": "anyValue"}}, "user_id": "user_id"}, "response": {"output": {"generic": [{"response_type": "conversation_search", "text": "text", "citations_title": "citations_title", "citations": [{"title": "title", "text": "text", "body": "body", "search_result_index": 19, "ranges": [{"start": 5, "end": 3}]}], "confidence_scores": {"threshold": 9, "pre_gen": 7, "post_gen": 8, "extractiveness": 14}, "response_length_option": "response_length_option", "search_results": [{"result_metadata": {"document_retrieval_source": "document_retrieval_source", "score": 5}, "id": "id", "title": "title", "body": "body"}], "disclaimer": "disclaimer"}], "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed", "turn_events": [{"event": "action_visited", "source": {"type": "action", "action": "action", "action_title": "action_title", "condition": "condition"}, "action_start_time": "action_start_time", "condition_type": "user_defined", "reason": "intent", "result_variable": "result_variable"}]}, "user_defined": {"anyKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}, "llm_metadata": [{"task": "task", "model_id": "model_id"}]}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time", "session_start_time": "session_start_time", "state": "state", "skip_user_input": false}, "session_id": "session_id"}, "skills": {"main skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}}, "actions skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}, "action_variables": {"anyKey": "anyValue"}, "skill_variables": {"anyKey": "anyValue"}}}, "integrations": {"anyKey": "anyValue"}}, "user_id": "user_id"}, "assistant_id": "assistant_id", "session_id": "session_id", "skill_id": "skill_id", "snapshot": "snapshot", "request_timestamp": "request_timestamp", "response_timestamp": "response_timestamp", "language": "language", "customer_id": "customer_id"}], "pagination": {"next_url": "next_url", "matched": 7, "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + + # Invoke method + response = _service.list_logs( + assistant_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_logs_required_params_with_retries(self): + # Enable retries and run test_list_logs_required_params. + _service.enable_retries() + self.test_list_logs_required_params() + + # Disable retries and run test_list_logs_required_params. + _service.disable_retries() + self.test_list_logs_required_params() + + @responses.activate + def test_list_logs_value_error(self): + """ + test_list_logs_value_error() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/logs') + mock_response = '{"logs": [{"log_id": "log_id", "request": {"input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "suggestion_id": "suggestion_id", "attachments": [{"url": "url", "media_type": "media_type"}], "analytics": {"browser": "browser", "device": "device", "pageUrl": "page_url"}, "options": {"restart": false, "alternate_intents": false, "async_callout": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": false, "export": false}}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time", "session_start_time": "session_start_time", "state": "state", "skip_user_input": false}, "session_id": "session_id"}, "skills": {"main skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}}, "actions skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}, "action_variables": {"anyKey": "anyValue"}, "skill_variables": {"anyKey": "anyValue"}}}, "integrations": {"anyKey": "anyValue"}}, "user_id": "user_id"}, "response": {"output": {"generic": [{"response_type": "conversation_search", "text": "text", "citations_title": "citations_title", "citations": [{"title": "title", "text": "text", "body": "body", "search_result_index": 19, "ranges": [{"start": 5, "end": 3}]}], "confidence_scores": {"threshold": 9, "pre_gen": 7, "post_gen": 8, "extractiveness": 14}, "response_length_option": "response_length_option", "search_results": [{"result_metadata": {"document_retrieval_source": "document_retrieval_source", "score": 5}, "id": "id", "title": "title", "body": "body"}], "disclaimer": "disclaimer"}], "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed", "turn_events": [{"event": "action_visited", "source": {"type": "action", "action": "action", "action_title": "action_title", "condition": "condition"}, "action_start_time": "action_start_time", "condition_type": "user_defined", "reason": "intent", "result_variable": "result_variable"}]}, "user_defined": {"anyKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}, "llm_metadata": [{"task": "task", "model_id": "model_id"}]}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time", "session_start_time": "session_start_time", "state": "state", "skip_user_input": false}, "session_id": "session_id"}, "skills": {"main skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}}, "actions skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}, "action_variables": {"anyKey": "anyValue"}, "skill_variables": {"anyKey": "anyValue"}}}, "integrations": {"anyKey": "anyValue"}}, "user_id": "user_id"}, "assistant_id": "assistant_id", "session_id": "session_id", "skill_id": "skill_id", "snapshot": "snapshot", "request_timestamp": "request_timestamp", "response_timestamp": "response_timestamp", "language": "language", "customer_id": "customer_id"}], "pagination": {"next_url": "next_url", "matched": 7, "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "assistant_id": assistant_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.list_logs(**req_copy) + + def test_list_logs_value_error_with_retries(self): + # Enable retries and run test_list_logs_value_error. + _service.enable_retries() + self.test_list_logs_value_error() + + # Disable retries and run test_list_logs_value_error. + _service.disable_retries() + self.test_list_logs_value_error() + + +# endregion +############################################################################## +# End of Service: Logs +############################################################################## + +############################################################################## +# Start of Service: UserData +############################################################################## +# region + + +class TestDeleteUserData: + """ + Test Class for delete_user_data + """ + + @responses.activate + def test_delete_user_data_all_params(self): + """ + delete_user_data() + """ + # Set up mock + url = preprocess_url('/v2/user_data') + responses.add( + responses.DELETE, + url, + status=202, + ) + + # Set up parameter values + customer_id = 'testString' + + # Invoke method + response = _service.delete_user_data( + customer_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 202 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'customer_id={}'.format(customer_id) in query_string + + def test_delete_user_data_all_params_with_retries(self): + # Enable retries and run test_delete_user_data_all_params. + _service.enable_retries() + self.test_delete_user_data_all_params() + + # Disable retries and run test_delete_user_data_all_params. + _service.disable_retries() + self.test_delete_user_data_all_params() + + @responses.activate + def test_delete_user_data_value_error(self): + """ + test_delete_user_data_value_error() + """ + # Set up mock + url = preprocess_url('/v2/user_data') + responses.add( + responses.DELETE, + url, + status=202, + ) + + # Set up parameter values + customer_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customer_id": customer_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_user_data(**req_copy) + + def test_delete_user_data_value_error_with_retries(self): + # Enable retries and run test_delete_user_data_value_error. + _service.enable_retries() + self.test_delete_user_data_value_error() + + # Disable retries and run test_delete_user_data_value_error. + _service.disable_retries() + self.test_delete_user_data_value_error() + + +# endregion +############################################################################## +# End of Service: UserData +############################################################################## + +############################################################################## +# Start of Service: Environments +############################################################################## +# region + + +class TestListEnvironments: + """ + Test Class for list_environments + """ + + @responses.activate + def test_list_environments_all_params(self): + """ + list_environments() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/environments') + mock_response = '{"environments": [{"name": "name", "description": "description", "assistant_id": "assistant_id", "environment_id": "environment_id", "environment": "environment", "release_reference": {"release": "release"}, "orchestration": {"search_skill_fallback": false}, "session_timeout": 10, "integration_references": [{"integration_id": "integration_id", "type": "type"}], "skill_references": [{"skill_id": "skill_id", "type": "dialog", "disabled": true, "snapshot": "snapshot", "skill_reference": "skill_reference"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + page_limit = 100 + include_count = False + sort = 'name' + cursor = 'testString' + include_audit = False + + # Invoke method + response = _service.list_environments( + assistant_id, + page_limit=page_limit, + include_count=include_count, + sort=sort, + cursor=cursor, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'page_limit={}'.format(page_limit) in query_string + assert 'include_count={}'.format('true' if include_count else 'false') in query_string + assert 'sort={}'.format(sort) in query_string + assert 'cursor={}'.format(cursor) in query_string + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + + def test_list_environments_all_params_with_retries(self): + # Enable retries and run test_list_environments_all_params. + _service.enable_retries() + self.test_list_environments_all_params() + + # Disable retries and run test_list_environments_all_params. + _service.disable_retries() + self.test_list_environments_all_params() + + @responses.activate + def test_list_environments_required_params(self): + """ + test_list_environments_required_params() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/environments') + mock_response = '{"environments": [{"name": "name", "description": "description", "assistant_id": "assistant_id", "environment_id": "environment_id", "environment": "environment", "release_reference": {"release": "release"}, "orchestration": {"search_skill_fallback": false}, "session_timeout": 10, "integration_references": [{"integration_id": "integration_id", "type": "type"}], "skill_references": [{"skill_id": "skill_id", "type": "dialog", "disabled": true, "snapshot": "snapshot", "skill_reference": "skill_reference"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + + # Invoke method + response = _service.list_environments( + assistant_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_environments_required_params_with_retries(self): + # Enable retries and run test_list_environments_required_params. + _service.enable_retries() + self.test_list_environments_required_params() + + # Disable retries and run test_list_environments_required_params. + _service.disable_retries() + self.test_list_environments_required_params() + + @responses.activate + def test_list_environments_value_error(self): + """ + test_list_environments_value_error() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/environments') + mock_response = '{"environments": [{"name": "name", "description": "description", "assistant_id": "assistant_id", "environment_id": "environment_id", "environment": "environment", "release_reference": {"release": "release"}, "orchestration": {"search_skill_fallback": false}, "session_timeout": 10, "integration_references": [{"integration_id": "integration_id", "type": "type"}], "skill_references": [{"skill_id": "skill_id", "type": "dialog", "disabled": true, "snapshot": "snapshot", "skill_reference": "skill_reference"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "assistant_id": assistant_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.list_environments(**req_copy) + + def test_list_environments_value_error_with_retries(self): + # Enable retries and run test_list_environments_value_error. + _service.enable_retries() + self.test_list_environments_value_error() + + # Disable retries and run test_list_environments_value_error. + _service.disable_retries() + self.test_list_environments_value_error() + + +class TestGetEnvironment: + """ + Test Class for get_environment + """ + + @responses.activate + def test_get_environment_all_params(self): + """ + get_environment() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/environments/testString') + mock_response = '{"name": "name", "description": "description", "assistant_id": "assistant_id", "environment_id": "environment_id", "environment": "environment", "release_reference": {"release": "release"}, "orchestration": {"search_skill_fallback": false}, "session_timeout": 10, "integration_references": [{"integration_id": "integration_id", "type": "type"}], "skill_references": [{"skill_id": "skill_id", "type": "dialog", "disabled": true, "snapshot": "snapshot", "skill_reference": "skill_reference"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + environment_id = 'testString' + include_audit = False + + # Invoke method + response = _service.get_environment( + assistant_id, + environment_id, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + + def test_get_environment_all_params_with_retries(self): + # Enable retries and run test_get_environment_all_params. + _service.enable_retries() + self.test_get_environment_all_params() + + # Disable retries and run test_get_environment_all_params. + _service.disable_retries() + self.test_get_environment_all_params() + + @responses.activate + def test_get_environment_required_params(self): + """ + test_get_environment_required_params() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/environments/testString') + mock_response = '{"name": "name", "description": "description", "assistant_id": "assistant_id", "environment_id": "environment_id", "environment": "environment", "release_reference": {"release": "release"}, "orchestration": {"search_skill_fallback": false}, "session_timeout": 10, "integration_references": [{"integration_id": "integration_id", "type": "type"}], "skill_references": [{"skill_id": "skill_id", "type": "dialog", "disabled": true, "snapshot": "snapshot", "skill_reference": "skill_reference"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + environment_id = 'testString' + + # Invoke method + response = _service.get_environment( + assistant_id, + environment_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_get_environment_required_params_with_retries(self): + # Enable retries and run test_get_environment_required_params. + _service.enable_retries() + self.test_get_environment_required_params() + + # Disable retries and run test_get_environment_required_params. + _service.disable_retries() + self.test_get_environment_required_params() + + @responses.activate + def test_get_environment_value_error(self): + """ + test_get_environment_value_error() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/environments/testString') + mock_response = '{"name": "name", "description": "description", "assistant_id": "assistant_id", "environment_id": "environment_id", "environment": "environment", "release_reference": {"release": "release"}, "orchestration": {"search_skill_fallback": false}, "session_timeout": 10, "integration_references": [{"integration_id": "integration_id", "type": "type"}], "skill_references": [{"skill_id": "skill_id", "type": "dialog", "disabled": true, "snapshot": "snapshot", "skill_reference": "skill_reference"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + environment_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "assistant_id": assistant_id, + "environment_id": environment_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_environment(**req_copy) + + def test_get_environment_value_error_with_retries(self): + # Enable retries and run test_get_environment_value_error. + _service.enable_retries() + self.test_get_environment_value_error() + + # Disable retries and run test_get_environment_value_error. + _service.disable_retries() + self.test_get_environment_value_error() + + +class TestUpdateEnvironment: + """ + Test Class for update_environment + """ + + @responses.activate + def test_update_environment_all_params(self): + """ + update_environment() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/environments/testString') + mock_response = '{"name": "name", "description": "description", "assistant_id": "assistant_id", "environment_id": "environment_id", "environment": "environment", "release_reference": {"release": "release"}, "orchestration": {"search_skill_fallback": false}, "session_timeout": 10, "integration_references": [{"integration_id": "integration_id", "type": "type"}], "skill_references": [{"skill_id": "skill_id", "type": "dialog", "disabled": true, "snapshot": "snapshot", "skill_reference": "skill_reference"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Construct a dict representation of a UpdateEnvironmentOrchestration model + update_environment_orchestration_model = {} + update_environment_orchestration_model['search_skill_fallback'] = True + + # Construct a dict representation of a EnvironmentSkill model + environment_skill_model = {} + environment_skill_model['skill_id'] = 'testString' + environment_skill_model['type'] = 'dialog' + environment_skill_model['disabled'] = True + environment_skill_model['snapshot'] = 'testString' + environment_skill_model['skill_reference'] = 'testString' + + # Set up parameter values + assistant_id = 'testString' + environment_id = 'testString' + name = 'testString' + description = 'testString' + orchestration = update_environment_orchestration_model + session_timeout = 10 + skill_references = [environment_skill_model] + + # Invoke method + response = _service.update_environment( + assistant_id, + environment_id, + name=name, + description=description, + orchestration=orchestration, + session_timeout=session_timeout, + skill_references=skill_references, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['name'] == 'testString' + assert req_body['description'] == 'testString' + assert req_body['orchestration'] == update_environment_orchestration_model + assert req_body['session_timeout'] == 10 + assert req_body['skill_references'] == [environment_skill_model] + + def test_update_environment_all_params_with_retries(self): + # Enable retries and run test_update_environment_all_params. + _service.enable_retries() + self.test_update_environment_all_params() + + # Disable retries and run test_update_environment_all_params. + _service.disable_retries() + self.test_update_environment_all_params() + + @responses.activate + def test_update_environment_required_params(self): + """ + test_update_environment_required_params() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/environments/testString') + mock_response = '{"name": "name", "description": "description", "assistant_id": "assistant_id", "environment_id": "environment_id", "environment": "environment", "release_reference": {"release": "release"}, "orchestration": {"search_skill_fallback": false}, "session_timeout": 10, "integration_references": [{"integration_id": "integration_id", "type": "type"}], "skill_references": [{"skill_id": "skill_id", "type": "dialog", "disabled": true, "snapshot": "snapshot", "skill_reference": "skill_reference"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + environment_id = 'testString' + + # Invoke method + response = _service.update_environment( + assistant_id, + environment_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_update_environment_required_params_with_retries(self): + # Enable retries and run test_update_environment_required_params. + _service.enable_retries() + self.test_update_environment_required_params() + + # Disable retries and run test_update_environment_required_params. + _service.disable_retries() + self.test_update_environment_required_params() + + @responses.activate + def test_update_environment_value_error(self): + """ + test_update_environment_value_error() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/environments/testString') + mock_response = '{"name": "name", "description": "description", "assistant_id": "assistant_id", "environment_id": "environment_id", "environment": "environment", "release_reference": {"release": "release"}, "orchestration": {"search_skill_fallback": false}, "session_timeout": 10, "integration_references": [{"integration_id": "integration_id", "type": "type"}], "skill_references": [{"skill_id": "skill_id", "type": "dialog", "disabled": true, "snapshot": "snapshot", "skill_reference": "skill_reference"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + environment_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "assistant_id": assistant_id, + "environment_id": environment_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.update_environment(**req_copy) + + def test_update_environment_value_error_with_retries(self): + # Enable retries and run test_update_environment_value_error. + _service.enable_retries() + self.test_update_environment_value_error() + + # Disable retries and run test_update_environment_value_error. + _service.disable_retries() + self.test_update_environment_value_error() + + +# endregion +############################################################################## +# End of Service: Environments +############################################################################## + +############################################################################## +# Start of Service: Releases +############################################################################## +# region + + +class TestCreateRelease: + """ + Test Class for create_release + """ + + @responses.activate + def test_create_release_all_params(self): + """ + create_release() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/releases') + mock_response = '{"release": "release", "description": "description", "environment_references": [{"name": "name", "environment_id": "environment_id", "environment": "draft"}], "content": {"skills": [{"skill_id": "skill_id", "type": "dialog", "snapshot": "snapshot"}]}, "status": "Available", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=202, + ) + + # Set up parameter values + assistant_id = 'testString' + description = 'testString' + + # Invoke method + response = _service.create_release( + assistant_id, + description=description, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 202 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['description'] == 'testString' + + def test_create_release_all_params_with_retries(self): + # Enable retries and run test_create_release_all_params. + _service.enable_retries() + self.test_create_release_all_params() + + # Disable retries and run test_create_release_all_params. + _service.disable_retries() + self.test_create_release_all_params() + + @responses.activate + def test_create_release_required_params(self): + """ + test_create_release_required_params() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/releases') + mock_response = '{"release": "release", "description": "description", "environment_references": [{"name": "name", "environment_id": "environment_id", "environment": "draft"}], "content": {"skills": [{"skill_id": "skill_id", "type": "dialog", "snapshot": "snapshot"}]}, "status": "Available", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=202, + ) + + # Set up parameter values + assistant_id = 'testString' + + # Invoke method + response = _service.create_release( + assistant_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 202 + + def test_create_release_required_params_with_retries(self): + # Enable retries and run test_create_release_required_params. + _service.enable_retries() + self.test_create_release_required_params() + + # Disable retries and run test_create_release_required_params. + _service.disable_retries() + self.test_create_release_required_params() + + @responses.activate + def test_create_release_value_error(self): + """ + test_create_release_value_error() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/releases') + mock_response = '{"release": "release", "description": "description", "environment_references": [{"name": "name", "environment_id": "environment_id", "environment": "draft"}], "content": {"skills": [{"skill_id": "skill_id", "type": "dialog", "snapshot": "snapshot"}]}, "status": "Available", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=202, + ) + + # Set up parameter values + assistant_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "assistant_id": assistant_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.create_release(**req_copy) + + def test_create_release_value_error_with_retries(self): + # Enable retries and run test_create_release_value_error. + _service.enable_retries() + self.test_create_release_value_error() + + # Disable retries and run test_create_release_value_error. + _service.disable_retries() + self.test_create_release_value_error() + + +class TestListReleases: + """ + Test Class for list_releases + """ + + @responses.activate + def test_list_releases_all_params(self): + """ + list_releases() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/releases') + mock_response = '{"releases": [{"release": "release", "description": "description", "environment_references": [{"name": "name", "environment_id": "environment_id", "environment": "draft"}], "content": {"skills": [{"skill_id": "skill_id", "type": "dialog", "snapshot": "snapshot"}]}, "status": "Available", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + page_limit = 100 + include_count = False + sort = 'name' + cursor = 'testString' + include_audit = False + + # Invoke method + response = _service.list_releases( + assistant_id, + page_limit=page_limit, + include_count=include_count, + sort=sort, + cursor=cursor, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'page_limit={}'.format(page_limit) in query_string + assert 'include_count={}'.format('true' if include_count else 'false') in query_string + assert 'sort={}'.format(sort) in query_string + assert 'cursor={}'.format(cursor) in query_string + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + + def test_list_releases_all_params_with_retries(self): + # Enable retries and run test_list_releases_all_params. + _service.enable_retries() + self.test_list_releases_all_params() + + # Disable retries and run test_list_releases_all_params. + _service.disable_retries() + self.test_list_releases_all_params() + + @responses.activate + def test_list_releases_required_params(self): + """ + test_list_releases_required_params() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/releases') + mock_response = '{"releases": [{"release": "release", "description": "description", "environment_references": [{"name": "name", "environment_id": "environment_id", "environment": "draft"}], "content": {"skills": [{"skill_id": "skill_id", "type": "dialog", "snapshot": "snapshot"}]}, "status": "Available", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + + # Invoke method + response = _service.list_releases( + assistant_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_releases_required_params_with_retries(self): + # Enable retries and run test_list_releases_required_params. + _service.enable_retries() + self.test_list_releases_required_params() + + # Disable retries and run test_list_releases_required_params. + _service.disable_retries() + self.test_list_releases_required_params() + + @responses.activate + def test_list_releases_value_error(self): + """ + test_list_releases_value_error() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/releases') + mock_response = '{"releases": [{"release": "release", "description": "description", "environment_references": [{"name": "name", "environment_id": "environment_id", "environment": "draft"}], "content": {"skills": [{"skill_id": "skill_id", "type": "dialog", "snapshot": "snapshot"}]}, "status": "Available", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "assistant_id": assistant_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.list_releases(**req_copy) + + def test_list_releases_value_error_with_retries(self): + # Enable retries and run test_list_releases_value_error. + _service.enable_retries() + self.test_list_releases_value_error() + + # Disable retries and run test_list_releases_value_error. + _service.disable_retries() + self.test_list_releases_value_error() + + +class TestGetRelease: + """ + Test Class for get_release + """ + + @responses.activate + def test_get_release_all_params(self): + """ + get_release() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/releases/testString') + mock_response = '{"release": "release", "description": "description", "environment_references": [{"name": "name", "environment_id": "environment_id", "environment": "draft"}], "content": {"skills": [{"skill_id": "skill_id", "type": "dialog", "snapshot": "snapshot"}]}, "status": "Available", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + release = 'testString' + include_audit = False + + # Invoke method + response = _service.get_release( + assistant_id, + release, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + + def test_get_release_all_params_with_retries(self): + # Enable retries and run test_get_release_all_params. + _service.enable_retries() + self.test_get_release_all_params() + + # Disable retries and run test_get_release_all_params. + _service.disable_retries() + self.test_get_release_all_params() + + @responses.activate + def test_get_release_required_params(self): + """ + test_get_release_required_params() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/releases/testString') + mock_response = '{"release": "release", "description": "description", "environment_references": [{"name": "name", "environment_id": "environment_id", "environment": "draft"}], "content": {"skills": [{"skill_id": "skill_id", "type": "dialog", "snapshot": "snapshot"}]}, "status": "Available", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + release = 'testString' + + # Invoke method + response = _service.get_release( + assistant_id, + release, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_get_release_required_params_with_retries(self): + # Enable retries and run test_get_release_required_params. + _service.enable_retries() + self.test_get_release_required_params() + + # Disable retries and run test_get_release_required_params. + _service.disable_retries() + self.test_get_release_required_params() + + @responses.activate + def test_get_release_value_error(self): + """ + test_get_release_value_error() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/releases/testString') + mock_response = '{"release": "release", "description": "description", "environment_references": [{"name": "name", "environment_id": "environment_id", "environment": "draft"}], "content": {"skills": [{"skill_id": "skill_id", "type": "dialog", "snapshot": "snapshot"}]}, "status": "Available", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + release = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "assistant_id": assistant_id, + "release": release, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_release(**req_copy) + + def test_get_release_value_error_with_retries(self): + # Enable retries and run test_get_release_value_error. + _service.enable_retries() + self.test_get_release_value_error() + + # Disable retries and run test_get_release_value_error. + _service.disable_retries() + self.test_get_release_value_error() + + +class TestDeleteRelease: + """ + Test Class for delete_release + """ + + @responses.activate + def test_delete_release_all_params(self): + """ + delete_release() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/releases/testString') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + release = 'testString' + + # Invoke method + response = _service.delete_release( + assistant_id, + release, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_delete_release_all_params_with_retries(self): + # Enable retries and run test_delete_release_all_params. + _service.enable_retries() + self.test_delete_release_all_params() + + # Disable retries and run test_delete_release_all_params. + _service.disable_retries() + self.test_delete_release_all_params() + + @responses.activate + def test_delete_release_value_error(self): + """ + test_delete_release_value_error() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/releases/testString') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + release = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "assistant_id": assistant_id, + "release": release, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_release(**req_copy) + + def test_delete_release_value_error_with_retries(self): + # Enable retries and run test_delete_release_value_error. + _service.enable_retries() + self.test_delete_release_value_error() + + # Disable retries and run test_delete_release_value_error. + _service.disable_retries() + self.test_delete_release_value_error() + + +class TestDeployRelease: + """ + Test Class for deploy_release + """ + + @responses.activate + def test_deploy_release_all_params(self): + """ + deploy_release() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/releases/testString/deploy') + mock_response = '{"name": "name", "description": "description", "assistant_id": "assistant_id", "environment_id": "environment_id", "environment": "environment", "release_reference": {"release": "release"}, "orchestration": {"search_skill_fallback": false}, "session_timeout": 10, "integration_references": [{"integration_id": "integration_id", "type": "type"}], "skill_references": [{"skill_id": "skill_id", "type": "dialog", "disabled": true, "snapshot": "snapshot", "skill_reference": "skill_reference"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + release = 'testString' + environment_id = 'testString' + include_audit = False + + # Invoke method + response = _service.deploy_release( + assistant_id, + release, + environment_id, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['environment_id'] == 'testString' + + def test_deploy_release_all_params_with_retries(self): + # Enable retries and run test_deploy_release_all_params. + _service.enable_retries() + self.test_deploy_release_all_params() + + # Disable retries and run test_deploy_release_all_params. + _service.disable_retries() + self.test_deploy_release_all_params() + + @responses.activate + def test_deploy_release_required_params(self): + """ + test_deploy_release_required_params() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/releases/testString/deploy') + mock_response = '{"name": "name", "description": "description", "assistant_id": "assistant_id", "environment_id": "environment_id", "environment": "environment", "release_reference": {"release": "release"}, "orchestration": {"search_skill_fallback": false}, "session_timeout": 10, "integration_references": [{"integration_id": "integration_id", "type": "type"}], "skill_references": [{"skill_id": "skill_id", "type": "dialog", "disabled": true, "snapshot": "snapshot", "skill_reference": "skill_reference"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + release = 'testString' + environment_id = 'testString' + + # Invoke method + response = _service.deploy_release( + assistant_id, + release, + environment_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['environment_id'] == 'testString' + + def test_deploy_release_required_params_with_retries(self): + # Enable retries and run test_deploy_release_required_params. + _service.enable_retries() + self.test_deploy_release_required_params() + + # Disable retries and run test_deploy_release_required_params. + _service.disable_retries() + self.test_deploy_release_required_params() + + @responses.activate + def test_deploy_release_value_error(self): + """ + test_deploy_release_value_error() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/releases/testString/deploy') + mock_response = '{"name": "name", "description": "description", "assistant_id": "assistant_id", "environment_id": "environment_id", "environment": "environment", "release_reference": {"release": "release"}, "orchestration": {"search_skill_fallback": false}, "session_timeout": 10, "integration_references": [{"integration_id": "integration_id", "type": "type"}], "skill_references": [{"skill_id": "skill_id", "type": "dialog", "disabled": true, "snapshot": "snapshot", "skill_reference": "skill_reference"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + release = 'testString' + environment_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "assistant_id": assistant_id, + "release": release, + "environment_id": environment_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.deploy_release(**req_copy) + + def test_deploy_release_value_error_with_retries(self): + # Enable retries and run test_deploy_release_value_error. + _service.enable_retries() + self.test_deploy_release_value_error() + + # Disable retries and run test_deploy_release_value_error. + _service.disable_retries() + self.test_deploy_release_value_error() + + +class TestCreateReleaseExport: + """ + Test Class for create_release_export + """ + + @responses.activate + def test_create_release_export_all_params(self): + """ + create_release_export() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/releases/testString/export') + mock_response = '{"status": "Available", "task_id": "task_id", "assistant_id": "assistant_id", "release": "release", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status_errors": [{"message": "message"}], "status_description": "status_description"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + release = 'testString' + include_audit = False + + # Invoke method + response = _service.create_release_export( + assistant_id, + release, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + + def test_create_release_export_all_params_with_retries(self): + # Enable retries and run test_create_release_export_all_params. + _service.enable_retries() + self.test_create_release_export_all_params() + + # Disable retries and run test_create_release_export_all_params. + _service.disable_retries() + self.test_create_release_export_all_params() + + @responses.activate + def test_create_release_export_required_params(self): + """ + test_create_release_export_required_params() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/releases/testString/export') + mock_response = '{"status": "Available", "task_id": "task_id", "assistant_id": "assistant_id", "release": "release", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status_errors": [{"message": "message"}], "status_description": "status_description"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + release = 'testString' + + # Invoke method + response = _service.create_release_export( + assistant_id, + release, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_create_release_export_required_params_with_retries(self): + # Enable retries and run test_create_release_export_required_params. + _service.enable_retries() + self.test_create_release_export_required_params() + + # Disable retries and run test_create_release_export_required_params. + _service.disable_retries() + self.test_create_release_export_required_params() + + @responses.activate + def test_create_release_export_value_error(self): + """ + test_create_release_export_value_error() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/releases/testString/export') + mock_response = '{"status": "Available", "task_id": "task_id", "assistant_id": "assistant_id", "release": "release", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status_errors": [{"message": "message"}], "status_description": "status_description"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + release = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "assistant_id": assistant_id, + "release": release, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.create_release_export(**req_copy) + + def test_create_release_export_value_error_with_retries(self): + # Enable retries and run test_create_release_export_value_error. + _service.enable_retries() + self.test_create_release_export_value_error() + + # Disable retries and run test_create_release_export_value_error. + _service.disable_retries() + self.test_create_release_export_value_error() + + +class TestDownloadReleaseExport: + """ + Test Class for download_release_export + """ + + @responses.activate + def test_download_release_export_all_params(self): + """ + download_release_export() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/releases/testString/export') + mock_response = '{"status": "Available", "task_id": "task_id", "assistant_id": "assistant_id", "release": "release", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status_errors": [{"message": "message"}], "status_description": "status_description"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + release = 'testString' + accept = 'application/json' + include_audit = False + + # Invoke method + response = _service.download_release_export( + assistant_id, + release, + accept=accept, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + + def test_download_release_export_all_params_with_retries(self): + # Enable retries and run test_download_release_export_all_params. + _service.enable_retries() + self.test_download_release_export_all_params() + + # Disable retries and run test_download_release_export_all_params. + _service.disable_retries() + self.test_download_release_export_all_params() + + @responses.activate + def test_download_release_export_required_params(self): + """ + test_download_release_export_required_params() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/releases/testString/export') + mock_response = '{"status": "Available", "task_id": "task_id", "assistant_id": "assistant_id", "release": "release", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status_errors": [{"message": "message"}], "status_description": "status_description"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + release = 'testString' + + # Invoke method + response = _service.download_release_export( + assistant_id, + release, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_download_release_export_required_params_with_retries(self): + # Enable retries and run test_download_release_export_required_params. + _service.enable_retries() + self.test_download_release_export_required_params() + + # Disable retries and run test_download_release_export_required_params. + _service.disable_retries() + self.test_download_release_export_required_params() + + @responses.activate + def test_download_release_export_value_error(self): + """ + test_download_release_export_value_error() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/releases/testString/export') + mock_response = '{"status": "Available", "task_id": "task_id", "assistant_id": "assistant_id", "release": "release", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status_errors": [{"message": "message"}], "status_description": "status_description"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + release = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "assistant_id": assistant_id, + "release": release, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.download_release_export(**req_copy) + + def test_download_release_export_value_error_with_retries(self): + # Enable retries and run test_download_release_export_value_error. + _service.enable_retries() + self.test_download_release_export_value_error() + + # Disable retries and run test_download_release_export_value_error. + _service.disable_retries() + self.test_download_release_export_value_error() + + +class TestCreateReleaseImport: + """ + Test Class for create_release_import + """ + + @responses.activate + def test_create_release_import_all_params(self): + """ + create_release_import() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/import') + mock_response = '{"status": "Failed", "task_id": "task_id", "assistant_id": "assistant_id", "skill_impact_in_draft": ["action"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=202, + ) + + # Set up parameter values + assistant_id = 'testString' + body = io.BytesIO(b'This is a mock file.').getvalue() + include_audit = False + + # Invoke method + response = _service.create_release_import( + assistant_id, + body, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 202 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + # Validate body params + assert responses.calls[0].request.body == body + + def test_create_release_import_all_params_with_retries(self): + # Enable retries and run test_create_release_import_all_params. + _service.enable_retries() + self.test_create_release_import_all_params() + + # Disable retries and run test_create_release_import_all_params. + _service.disable_retries() + self.test_create_release_import_all_params() + + @responses.activate + def test_create_release_import_required_params(self): + """ + test_create_release_import_required_params() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/import') + mock_response = '{"status": "Failed", "task_id": "task_id", "assistant_id": "assistant_id", "skill_impact_in_draft": ["action"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=202, + ) + + # Set up parameter values + assistant_id = 'testString' + body = io.BytesIO(b'This is a mock file.').getvalue() + + # Invoke method + response = _service.create_release_import( + assistant_id, + body, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 202 + # Validate body params + assert responses.calls[0].request.body == body + + def test_create_release_import_required_params_with_retries(self): + # Enable retries and run test_create_release_import_required_params. + _service.enable_retries() + self.test_create_release_import_required_params() + + # Disable retries and run test_create_release_import_required_params. + _service.disable_retries() + self.test_create_release_import_required_params() + + @responses.activate + def test_create_release_import_value_error(self): + """ + test_create_release_import_value_error() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/import') + mock_response = '{"status": "Failed", "task_id": "task_id", "assistant_id": "assistant_id", "skill_impact_in_draft": ["action"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=202, + ) + + # Set up parameter values + assistant_id = 'testString' + body = io.BytesIO(b'This is a mock file.').getvalue() + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "assistant_id": assistant_id, + "body": body, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.create_release_import(**req_copy) + + def test_create_release_import_value_error_with_retries(self): + # Enable retries and run test_create_release_import_value_error. + _service.enable_retries() + self.test_create_release_import_value_error() + + # Disable retries and run test_create_release_import_value_error. + _service.disable_retries() + self.test_create_release_import_value_error() + + +class TestGetReleaseImportStatus: + """ + Test Class for get_release_import_status + """ + + @responses.activate + def test_get_release_import_status_all_params(self): + """ + get_release_import_status() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/import') + mock_response = '{"status": "Completed", "task_id": "task_id", "assistant_id": "assistant_id", "status_errors": [{"message": "message"}], "status_description": "status_description", "skill_impact_in_draft": ["action"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + include_audit = False + + # Invoke method + response = _service.get_release_import_status( + assistant_id, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + + def test_get_release_import_status_all_params_with_retries(self): + # Enable retries and run test_get_release_import_status_all_params. + _service.enable_retries() + self.test_get_release_import_status_all_params() + + # Disable retries and run test_get_release_import_status_all_params. + _service.disable_retries() + self.test_get_release_import_status_all_params() + + @responses.activate + def test_get_release_import_status_required_params(self): + """ + test_get_release_import_status_required_params() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/import') + mock_response = '{"status": "Completed", "task_id": "task_id", "assistant_id": "assistant_id", "status_errors": [{"message": "message"}], "status_description": "status_description", "skill_impact_in_draft": ["action"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + + # Invoke method + response = _service.get_release_import_status( + assistant_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_get_release_import_status_required_params_with_retries(self): + # Enable retries and run test_get_release_import_status_required_params. + _service.enable_retries() + self.test_get_release_import_status_required_params() + + # Disable retries and run test_get_release_import_status_required_params. + _service.disable_retries() + self.test_get_release_import_status_required_params() + + @responses.activate + def test_get_release_import_status_value_error(self): + """ + test_get_release_import_status_value_error() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/import') + mock_response = '{"status": "Completed", "task_id": "task_id", "assistant_id": "assistant_id", "status_errors": [{"message": "message"}], "status_description": "status_description", "skill_impact_in_draft": ["action"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "assistant_id": assistant_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_release_import_status(**req_copy) + + def test_get_release_import_status_value_error_with_retries(self): + # Enable retries and run test_get_release_import_status_value_error. + _service.enable_retries() + self.test_get_release_import_status_value_error() + + # Disable retries and run test_get_release_import_status_value_error. + _service.disable_retries() + self.test_get_release_import_status_value_error() + + +# endregion +############################################################################## +# End of Service: Releases +############################################################################## + +############################################################################## +# Start of Service: Skills +############################################################################## +# region + + +class TestGetSkill: + """ + Test Class for get_skill + """ + + @responses.activate + def test_get_skill_all_params(self): + """ + get_skill() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/skills/testString') + mock_response = '{"name": "name", "description": "description", "workspace": {"anyKey": "anyValue"}, "skill_id": "skill_id", "status": "Available", "status_errors": [{"message": "message"}], "status_description": "status_description", "dialog_settings": {"anyKey": "anyValue"}, "assistant_id": "assistant_id", "workspace_id": "workspace_id", "environment_id": "environment_id", "valid": false, "next_snapshot_version": "next_snapshot_version", "search_settings": {"discovery": {"instance_id": "instance_id", "project_id": "project_id", "url": "url", "max_primary_results": 10000, "max_total_results": 10000, "confidence_threshold": 0.0, "highlight": false, "find_answers": true, "authentication": {"basic": "basic", "bearer": "bearer"}}, "messages": {"success": "success", "error": "error", "no_result": "no_result"}, "schema_mapping": {"url": "url", "body": "body", "title": "title"}, "elastic_search": {"url": "url", "port": "port", "username": "username", "password": "password", "index": "index", "filter": ["anyValue"], "query_body": {"anyKey": "anyValue"}, "managed_index": "managed_index", "apikey": "apikey"}, "conversational_search": {"enabled": true, "response_length": {"option": "moderate"}, "search_confidence": {"threshold": "less_often"}}, "server_side_search": {"url": "url", "port": "port", "username": "username", "password": "password", "filter": "filter", "metadata": {"anyKey": "anyValue"}, "apikey": "apikey", "no_auth": false, "auth_type": "basic"}, "client_side_search": {"filter": "filter", "metadata": {"anyKey": "anyValue"}}}, "warnings": [{"code": "code", "path": "path", "message": "message"}], "language": "language", "type": "action"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + skill_id = 'testString' + + # Invoke method + response = _service.get_skill( + assistant_id, + skill_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_get_skill_all_params_with_retries(self): + # Enable retries and run test_get_skill_all_params. + _service.enable_retries() + self.test_get_skill_all_params() + + # Disable retries and run test_get_skill_all_params. + _service.disable_retries() + self.test_get_skill_all_params() + + @responses.activate + def test_get_skill_value_error(self): + """ + test_get_skill_value_error() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/skills/testString') + mock_response = '{"name": "name", "description": "description", "workspace": {"anyKey": "anyValue"}, "skill_id": "skill_id", "status": "Available", "status_errors": [{"message": "message"}], "status_description": "status_description", "dialog_settings": {"anyKey": "anyValue"}, "assistant_id": "assistant_id", "workspace_id": "workspace_id", "environment_id": "environment_id", "valid": false, "next_snapshot_version": "next_snapshot_version", "search_settings": {"discovery": {"instance_id": "instance_id", "project_id": "project_id", "url": "url", "max_primary_results": 10000, "max_total_results": 10000, "confidence_threshold": 0.0, "highlight": false, "find_answers": true, "authentication": {"basic": "basic", "bearer": "bearer"}}, "messages": {"success": "success", "error": "error", "no_result": "no_result"}, "schema_mapping": {"url": "url", "body": "body", "title": "title"}, "elastic_search": {"url": "url", "port": "port", "username": "username", "password": "password", "index": "index", "filter": ["anyValue"], "query_body": {"anyKey": "anyValue"}, "managed_index": "managed_index", "apikey": "apikey"}, "conversational_search": {"enabled": true, "response_length": {"option": "moderate"}, "search_confidence": {"threshold": "less_often"}}, "server_side_search": {"url": "url", "port": "port", "username": "username", "password": "password", "filter": "filter", "metadata": {"anyKey": "anyValue"}, "apikey": "apikey", "no_auth": false, "auth_type": "basic"}, "client_side_search": {"filter": "filter", "metadata": {"anyKey": "anyValue"}}}, "warnings": [{"code": "code", "path": "path", "message": "message"}], "language": "language", "type": "action"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + skill_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "assistant_id": assistant_id, + "skill_id": skill_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_skill(**req_copy) + + def test_get_skill_value_error_with_retries(self): + # Enable retries and run test_get_skill_value_error. + _service.enable_retries() + self.test_get_skill_value_error() + + # Disable retries and run test_get_skill_value_error. + _service.disable_retries() + self.test_get_skill_value_error() + + +class TestUpdateSkill: + """ + Test Class for update_skill + """ + + @responses.activate + def test_update_skill_all_params(self): + """ + update_skill() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/skills/testString') + mock_response = '{"name": "name", "description": "description", "workspace": {"anyKey": "anyValue"}, "skill_id": "skill_id", "status": "Available", "status_errors": [{"message": "message"}], "status_description": "status_description", "dialog_settings": {"anyKey": "anyValue"}, "assistant_id": "assistant_id", "workspace_id": "workspace_id", "environment_id": "environment_id", "valid": false, "next_snapshot_version": "next_snapshot_version", "search_settings": {"discovery": {"instance_id": "instance_id", "project_id": "project_id", "url": "url", "max_primary_results": 10000, "max_total_results": 10000, "confidence_threshold": 0.0, "highlight": false, "find_answers": true, "authentication": {"basic": "basic", "bearer": "bearer"}}, "messages": {"success": "success", "error": "error", "no_result": "no_result"}, "schema_mapping": {"url": "url", "body": "body", "title": "title"}, "elastic_search": {"url": "url", "port": "port", "username": "username", "password": "password", "index": "index", "filter": ["anyValue"], "query_body": {"anyKey": "anyValue"}, "managed_index": "managed_index", "apikey": "apikey"}, "conversational_search": {"enabled": true, "response_length": {"option": "moderate"}, "search_confidence": {"threshold": "less_often"}}, "server_side_search": {"url": "url", "port": "port", "username": "username", "password": "password", "filter": "filter", "metadata": {"anyKey": "anyValue"}, "apikey": "apikey", "no_auth": false, "auth_type": "basic"}, "client_side_search": {"filter": "filter", "metadata": {"anyKey": "anyValue"}}}, "warnings": [{"code": "code", "path": "path", "message": "message"}], "language": "language", "type": "action"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=202, + ) + + # Construct a dict representation of a SearchSettingsDiscoveryAuthentication model + search_settings_discovery_authentication_model = {} + search_settings_discovery_authentication_model['basic'] = 'testString' + search_settings_discovery_authentication_model['bearer'] = 'testString' + + # Construct a dict representation of a SearchSettingsDiscovery model + search_settings_discovery_model = {} + search_settings_discovery_model['instance_id'] = 'testString' + search_settings_discovery_model['project_id'] = 'testString' + search_settings_discovery_model['url'] = 'testString' + search_settings_discovery_model['max_primary_results'] = 10000 + search_settings_discovery_model['max_total_results'] = 10000 + search_settings_discovery_model['confidence_threshold'] = 0.0 + search_settings_discovery_model['highlight'] = True + search_settings_discovery_model['find_answers'] = True + search_settings_discovery_model['authentication'] = search_settings_discovery_authentication_model + + # Construct a dict representation of a SearchSettingsMessages model + search_settings_messages_model = {} + search_settings_messages_model['success'] = 'testString' + search_settings_messages_model['error'] = 'testString' + search_settings_messages_model['no_result'] = 'testString' + + # Construct a dict representation of a SearchSettingsSchemaMapping model + search_settings_schema_mapping_model = {} + search_settings_schema_mapping_model['url'] = 'testString' + search_settings_schema_mapping_model['body'] = 'testString' + search_settings_schema_mapping_model['title'] = 'testString' + + # Construct a dict representation of a SearchSettingsElasticSearch model + search_settings_elastic_search_model = {} + search_settings_elastic_search_model['url'] = 'testString' + search_settings_elastic_search_model['port'] = 'testString' + search_settings_elastic_search_model['username'] = 'testString' + search_settings_elastic_search_model['password'] = 'testString' + search_settings_elastic_search_model['index'] = 'testString' + search_settings_elastic_search_model['filter'] = ['testString'] + search_settings_elastic_search_model['query_body'] = {'anyKey': 'anyValue'} + search_settings_elastic_search_model['managed_index'] = 'testString' + search_settings_elastic_search_model['apikey'] = 'testString' + + # Construct a dict representation of a SearchSettingsConversationalSearchResponseLength model + search_settings_conversational_search_response_length_model = {} + search_settings_conversational_search_response_length_model['option'] = 'moderate' + + # Construct a dict representation of a SearchSettingsConversationalSearchSearchConfidence model + search_settings_conversational_search_search_confidence_model = {} + search_settings_conversational_search_search_confidence_model['threshold'] = 'less_often' + + # Construct a dict representation of a SearchSettingsConversationalSearch model + search_settings_conversational_search_model = {} + search_settings_conversational_search_model['enabled'] = True + search_settings_conversational_search_model['response_length'] = search_settings_conversational_search_response_length_model + search_settings_conversational_search_model['search_confidence'] = search_settings_conversational_search_search_confidence_model + + # Construct a dict representation of a SearchSettingsServerSideSearch model + search_settings_server_side_search_model = {} + search_settings_server_side_search_model['url'] = 'testString' + search_settings_server_side_search_model['port'] = 'testString' + search_settings_server_side_search_model['username'] = 'testString' + search_settings_server_side_search_model['password'] = 'testString' + search_settings_server_side_search_model['filter'] = 'testString' + search_settings_server_side_search_model['metadata'] = {'anyKey': 'anyValue'} + search_settings_server_side_search_model['apikey'] = 'testString' + search_settings_server_side_search_model['no_auth'] = True + search_settings_server_side_search_model['auth_type'] = 'basic' + + # Construct a dict representation of a SearchSettingsClientSideSearch model + search_settings_client_side_search_model = {} + search_settings_client_side_search_model['filter'] = 'testString' + search_settings_client_side_search_model['metadata'] = {'anyKey': 'anyValue'} + + # Construct a dict representation of a SearchSettings model + search_settings_model = {} + search_settings_model['discovery'] = search_settings_discovery_model + search_settings_model['messages'] = search_settings_messages_model + search_settings_model['schema_mapping'] = search_settings_schema_mapping_model + search_settings_model['elastic_search'] = search_settings_elastic_search_model + search_settings_model['conversational_search'] = search_settings_conversational_search_model + search_settings_model['server_side_search'] = search_settings_server_side_search_model + search_settings_model['client_side_search'] = search_settings_client_side_search_model + + # Set up parameter values + assistant_id = 'testString' + skill_id = 'testString' + name = 'testString' + description = 'testString' + workspace = {'anyKey': 'anyValue'} + dialog_settings = {'anyKey': 'anyValue'} + search_settings = search_settings_model + + # Invoke method + response = _service.update_skill( + assistant_id, + skill_id, + name=name, + description=description, + workspace=workspace, + dialog_settings=dialog_settings, + search_settings=search_settings, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 202 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['name'] == 'testString' + assert req_body['description'] == 'testString' + assert req_body['workspace'] == {'anyKey': 'anyValue'} + assert req_body['dialog_settings'] == {'anyKey': 'anyValue'} + assert req_body['search_settings'] == search_settings_model + + def test_update_skill_all_params_with_retries(self): + # Enable retries and run test_update_skill_all_params. + _service.enable_retries() + self.test_update_skill_all_params() + + # Disable retries and run test_update_skill_all_params. + _service.disable_retries() + self.test_update_skill_all_params() + + @responses.activate + def test_update_skill_value_error(self): + """ + test_update_skill_value_error() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/skills/testString') + mock_response = '{"name": "name", "description": "description", "workspace": {"anyKey": "anyValue"}, "skill_id": "skill_id", "status": "Available", "status_errors": [{"message": "message"}], "status_description": "status_description", "dialog_settings": {"anyKey": "anyValue"}, "assistant_id": "assistant_id", "workspace_id": "workspace_id", "environment_id": "environment_id", "valid": false, "next_snapshot_version": "next_snapshot_version", "search_settings": {"discovery": {"instance_id": "instance_id", "project_id": "project_id", "url": "url", "max_primary_results": 10000, "max_total_results": 10000, "confidence_threshold": 0.0, "highlight": false, "find_answers": true, "authentication": {"basic": "basic", "bearer": "bearer"}}, "messages": {"success": "success", "error": "error", "no_result": "no_result"}, "schema_mapping": {"url": "url", "body": "body", "title": "title"}, "elastic_search": {"url": "url", "port": "port", "username": "username", "password": "password", "index": "index", "filter": ["anyValue"], "query_body": {"anyKey": "anyValue"}, "managed_index": "managed_index", "apikey": "apikey"}, "conversational_search": {"enabled": true, "response_length": {"option": "moderate"}, "search_confidence": {"threshold": "less_often"}}, "server_side_search": {"url": "url", "port": "port", "username": "username", "password": "password", "filter": "filter", "metadata": {"anyKey": "anyValue"}, "apikey": "apikey", "no_auth": false, "auth_type": "basic"}, "client_side_search": {"filter": "filter", "metadata": {"anyKey": "anyValue"}}}, "warnings": [{"code": "code", "path": "path", "message": "message"}], "language": "language", "type": "action"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=202, + ) + + # Construct a dict representation of a SearchSettingsDiscoveryAuthentication model + search_settings_discovery_authentication_model = {} + search_settings_discovery_authentication_model['basic'] = 'testString' + search_settings_discovery_authentication_model['bearer'] = 'testString' + + # Construct a dict representation of a SearchSettingsDiscovery model + search_settings_discovery_model = {} + search_settings_discovery_model['instance_id'] = 'testString' + search_settings_discovery_model['project_id'] = 'testString' + search_settings_discovery_model['url'] = 'testString' + search_settings_discovery_model['max_primary_results'] = 10000 + search_settings_discovery_model['max_total_results'] = 10000 + search_settings_discovery_model['confidence_threshold'] = 0.0 + search_settings_discovery_model['highlight'] = True + search_settings_discovery_model['find_answers'] = True + search_settings_discovery_model['authentication'] = search_settings_discovery_authentication_model + + # Construct a dict representation of a SearchSettingsMessages model + search_settings_messages_model = {} + search_settings_messages_model['success'] = 'testString' + search_settings_messages_model['error'] = 'testString' + search_settings_messages_model['no_result'] = 'testString' + + # Construct a dict representation of a SearchSettingsSchemaMapping model + search_settings_schema_mapping_model = {} + search_settings_schema_mapping_model['url'] = 'testString' + search_settings_schema_mapping_model['body'] = 'testString' + search_settings_schema_mapping_model['title'] = 'testString' + + # Construct a dict representation of a SearchSettingsElasticSearch model + search_settings_elastic_search_model = {} + search_settings_elastic_search_model['url'] = 'testString' + search_settings_elastic_search_model['port'] = 'testString' + search_settings_elastic_search_model['username'] = 'testString' + search_settings_elastic_search_model['password'] = 'testString' + search_settings_elastic_search_model['index'] = 'testString' + search_settings_elastic_search_model['filter'] = ['testString'] + search_settings_elastic_search_model['query_body'] = {'anyKey': 'anyValue'} + search_settings_elastic_search_model['managed_index'] = 'testString' + search_settings_elastic_search_model['apikey'] = 'testString' + + # Construct a dict representation of a SearchSettingsConversationalSearchResponseLength model + search_settings_conversational_search_response_length_model = {} + search_settings_conversational_search_response_length_model['option'] = 'moderate' + + # Construct a dict representation of a SearchSettingsConversationalSearchSearchConfidence model + search_settings_conversational_search_search_confidence_model = {} + search_settings_conversational_search_search_confidence_model['threshold'] = 'less_often' + + # Construct a dict representation of a SearchSettingsConversationalSearch model + search_settings_conversational_search_model = {} + search_settings_conversational_search_model['enabled'] = True + search_settings_conversational_search_model['response_length'] = search_settings_conversational_search_response_length_model + search_settings_conversational_search_model['search_confidence'] = search_settings_conversational_search_search_confidence_model + + # Construct a dict representation of a SearchSettingsServerSideSearch model + search_settings_server_side_search_model = {} + search_settings_server_side_search_model['url'] = 'testString' + search_settings_server_side_search_model['port'] = 'testString' + search_settings_server_side_search_model['username'] = 'testString' + search_settings_server_side_search_model['password'] = 'testString' + search_settings_server_side_search_model['filter'] = 'testString' + search_settings_server_side_search_model['metadata'] = {'anyKey': 'anyValue'} + search_settings_server_side_search_model['apikey'] = 'testString' + search_settings_server_side_search_model['no_auth'] = True + search_settings_server_side_search_model['auth_type'] = 'basic' + + # Construct a dict representation of a SearchSettingsClientSideSearch model + search_settings_client_side_search_model = {} + search_settings_client_side_search_model['filter'] = 'testString' + search_settings_client_side_search_model['metadata'] = {'anyKey': 'anyValue'} + + # Construct a dict representation of a SearchSettings model + search_settings_model = {} + search_settings_model['discovery'] = search_settings_discovery_model + search_settings_model['messages'] = search_settings_messages_model + search_settings_model['schema_mapping'] = search_settings_schema_mapping_model + search_settings_model['elastic_search'] = search_settings_elastic_search_model + search_settings_model['conversational_search'] = search_settings_conversational_search_model + search_settings_model['server_side_search'] = search_settings_server_side_search_model + search_settings_model['client_side_search'] = search_settings_client_side_search_model + + # Set up parameter values + assistant_id = 'testString' + skill_id = 'testString' + name = 'testString' + description = 'testString' + workspace = {'anyKey': 'anyValue'} + dialog_settings = {'anyKey': 'anyValue'} + search_settings = search_settings_model + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "assistant_id": assistant_id, + "skill_id": skill_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.update_skill(**req_copy) + + def test_update_skill_value_error_with_retries(self): + # Enable retries and run test_update_skill_value_error. + _service.enable_retries() + self.test_update_skill_value_error() + + # Disable retries and run test_update_skill_value_error. + _service.disable_retries() + self.test_update_skill_value_error() + + +class TestExportSkills: + """ + Test Class for export_skills + """ + + @responses.activate + def test_export_skills_all_params(self): + """ + export_skills() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/skills_export') + mock_response = '{"assistant_skills": [{"name": "name", "description": "description", "workspace": {"anyKey": "anyValue"}, "skill_id": "skill_id", "status": "Available", "status_errors": [{"message": "message"}], "status_description": "status_description", "dialog_settings": {"anyKey": "anyValue"}, "assistant_id": "assistant_id", "workspace_id": "workspace_id", "environment_id": "environment_id", "valid": false, "next_snapshot_version": "next_snapshot_version", "search_settings": {"discovery": {"instance_id": "instance_id", "project_id": "project_id", "url": "url", "max_primary_results": 10000, "max_total_results": 10000, "confidence_threshold": 0.0, "highlight": false, "find_answers": true, "authentication": {"basic": "basic", "bearer": "bearer"}}, "messages": {"success": "success", "error": "error", "no_result": "no_result"}, "schema_mapping": {"url": "url", "body": "body", "title": "title"}, "elastic_search": {"url": "url", "port": "port", "username": "username", "password": "password", "index": "index", "filter": ["anyValue"], "query_body": {"anyKey": "anyValue"}, "managed_index": "managed_index", "apikey": "apikey"}, "conversational_search": {"enabled": true, "response_length": {"option": "moderate"}, "search_confidence": {"threshold": "less_often"}}, "server_side_search": {"url": "url", "port": "port", "username": "username", "password": "password", "filter": "filter", "metadata": {"anyKey": "anyValue"}, "apikey": "apikey", "no_auth": false, "auth_type": "basic"}, "client_side_search": {"filter": "filter", "metadata": {"anyKey": "anyValue"}}}, "warnings": [{"code": "code", "path": "path", "message": "message"}], "language": "language", "type": "action"}], "assistant_state": {"action_disabled": false, "dialog_disabled": false}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + include_audit = False + + # Invoke method + response = _service.export_skills( + assistant_id, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + + def test_export_skills_all_params_with_retries(self): + # Enable retries and run test_export_skills_all_params. + _service.enable_retries() + self.test_export_skills_all_params() + + # Disable retries and run test_export_skills_all_params. + _service.disable_retries() + self.test_export_skills_all_params() + + @responses.activate + def test_export_skills_required_params(self): + """ + test_export_skills_required_params() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/skills_export') + mock_response = '{"assistant_skills": [{"name": "name", "description": "description", "workspace": {"anyKey": "anyValue"}, "skill_id": "skill_id", "status": "Available", "status_errors": [{"message": "message"}], "status_description": "status_description", "dialog_settings": {"anyKey": "anyValue"}, "assistant_id": "assistant_id", "workspace_id": "workspace_id", "environment_id": "environment_id", "valid": false, "next_snapshot_version": "next_snapshot_version", "search_settings": {"discovery": {"instance_id": "instance_id", "project_id": "project_id", "url": "url", "max_primary_results": 10000, "max_total_results": 10000, "confidence_threshold": 0.0, "highlight": false, "find_answers": true, "authentication": {"basic": "basic", "bearer": "bearer"}}, "messages": {"success": "success", "error": "error", "no_result": "no_result"}, "schema_mapping": {"url": "url", "body": "body", "title": "title"}, "elastic_search": {"url": "url", "port": "port", "username": "username", "password": "password", "index": "index", "filter": ["anyValue"], "query_body": {"anyKey": "anyValue"}, "managed_index": "managed_index", "apikey": "apikey"}, "conversational_search": {"enabled": true, "response_length": {"option": "moderate"}, "search_confidence": {"threshold": "less_often"}}, "server_side_search": {"url": "url", "port": "port", "username": "username", "password": "password", "filter": "filter", "metadata": {"anyKey": "anyValue"}, "apikey": "apikey", "no_auth": false, "auth_type": "basic"}, "client_side_search": {"filter": "filter", "metadata": {"anyKey": "anyValue"}}}, "warnings": [{"code": "code", "path": "path", "message": "message"}], "language": "language", "type": "action"}], "assistant_state": {"action_disabled": false, "dialog_disabled": false}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + + # Invoke method + response = _service.export_skills( + assistant_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_export_skills_required_params_with_retries(self): + # Enable retries and run test_export_skills_required_params. + _service.enable_retries() + self.test_export_skills_required_params() + + # Disable retries and run test_export_skills_required_params. + _service.disable_retries() + self.test_export_skills_required_params() + + @responses.activate + def test_export_skills_value_error(self): + """ + test_export_skills_value_error() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/skills_export') + mock_response = '{"assistant_skills": [{"name": "name", "description": "description", "workspace": {"anyKey": "anyValue"}, "skill_id": "skill_id", "status": "Available", "status_errors": [{"message": "message"}], "status_description": "status_description", "dialog_settings": {"anyKey": "anyValue"}, "assistant_id": "assistant_id", "workspace_id": "workspace_id", "environment_id": "environment_id", "valid": false, "next_snapshot_version": "next_snapshot_version", "search_settings": {"discovery": {"instance_id": "instance_id", "project_id": "project_id", "url": "url", "max_primary_results": 10000, "max_total_results": 10000, "confidence_threshold": 0.0, "highlight": false, "find_answers": true, "authentication": {"basic": "basic", "bearer": "bearer"}}, "messages": {"success": "success", "error": "error", "no_result": "no_result"}, "schema_mapping": {"url": "url", "body": "body", "title": "title"}, "elastic_search": {"url": "url", "port": "port", "username": "username", "password": "password", "index": "index", "filter": ["anyValue"], "query_body": {"anyKey": "anyValue"}, "managed_index": "managed_index", "apikey": "apikey"}, "conversational_search": {"enabled": true, "response_length": {"option": "moderate"}, "search_confidence": {"threshold": "less_often"}}, "server_side_search": {"url": "url", "port": "port", "username": "username", "password": "password", "filter": "filter", "metadata": {"anyKey": "anyValue"}, "apikey": "apikey", "no_auth": false, "auth_type": "basic"}, "client_side_search": {"filter": "filter", "metadata": {"anyKey": "anyValue"}}}, "warnings": [{"code": "code", "path": "path", "message": "message"}], "language": "language", "type": "action"}], "assistant_state": {"action_disabled": false, "dialog_disabled": false}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "assistant_id": assistant_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.export_skills(**req_copy) + + def test_export_skills_value_error_with_retries(self): + # Enable retries and run test_export_skills_value_error. + _service.enable_retries() + self.test_export_skills_value_error() + + # Disable retries and run test_export_skills_value_error. + _service.disable_retries() + self.test_export_skills_value_error() + + +class TestImportSkills: + """ + Test Class for import_skills + """ + + @responses.activate + def test_import_skills_all_params(self): + """ + import_skills() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/skills_import') + mock_response = '{"assistant_id": "assistant_id", "status": "Available", "status_description": "status_description", "status_errors": [{"message": "message"}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=202, + ) + + # Construct a dict representation of a SearchSettingsDiscoveryAuthentication model + search_settings_discovery_authentication_model = {} + search_settings_discovery_authentication_model['basic'] = 'testString' + search_settings_discovery_authentication_model['bearer'] = 'testString' + + # Construct a dict representation of a SearchSettingsDiscovery model + search_settings_discovery_model = {} + search_settings_discovery_model['instance_id'] = 'testString' + search_settings_discovery_model['project_id'] = 'testString' + search_settings_discovery_model['url'] = 'testString' + search_settings_discovery_model['max_primary_results'] = 10000 + search_settings_discovery_model['max_total_results'] = 10000 + search_settings_discovery_model['confidence_threshold'] = 0.0 + search_settings_discovery_model['highlight'] = True + search_settings_discovery_model['find_answers'] = True + search_settings_discovery_model['authentication'] = search_settings_discovery_authentication_model + + # Construct a dict representation of a SearchSettingsMessages model + search_settings_messages_model = {} + search_settings_messages_model['success'] = 'testString' + search_settings_messages_model['error'] = 'testString' + search_settings_messages_model['no_result'] = 'testString' + + # Construct a dict representation of a SearchSettingsSchemaMapping model + search_settings_schema_mapping_model = {} + search_settings_schema_mapping_model['url'] = 'testString' + search_settings_schema_mapping_model['body'] = 'testString' + search_settings_schema_mapping_model['title'] = 'testString' + + # Construct a dict representation of a SearchSettingsElasticSearch model + search_settings_elastic_search_model = {} + search_settings_elastic_search_model['url'] = 'testString' + search_settings_elastic_search_model['port'] = 'testString' + search_settings_elastic_search_model['username'] = 'testString' + search_settings_elastic_search_model['password'] = 'testString' + search_settings_elastic_search_model['index'] = 'testString' + search_settings_elastic_search_model['filter'] = ['testString'] + search_settings_elastic_search_model['query_body'] = {'anyKey': 'anyValue'} + search_settings_elastic_search_model['managed_index'] = 'testString' + search_settings_elastic_search_model['apikey'] = 'testString' + + # Construct a dict representation of a SearchSettingsConversationalSearchResponseLength model + search_settings_conversational_search_response_length_model = {} + search_settings_conversational_search_response_length_model['option'] = 'moderate' + + # Construct a dict representation of a SearchSettingsConversationalSearchSearchConfidence model + search_settings_conversational_search_search_confidence_model = {} + search_settings_conversational_search_search_confidence_model['threshold'] = 'less_often' + + # Construct a dict representation of a SearchSettingsConversationalSearch model + search_settings_conversational_search_model = {} + search_settings_conversational_search_model['enabled'] = True + search_settings_conversational_search_model['response_length'] = search_settings_conversational_search_response_length_model + search_settings_conversational_search_model['search_confidence'] = search_settings_conversational_search_search_confidence_model + + # Construct a dict representation of a SearchSettingsServerSideSearch model + search_settings_server_side_search_model = {} + search_settings_server_side_search_model['url'] = 'testString' + search_settings_server_side_search_model['port'] = 'testString' + search_settings_server_side_search_model['username'] = 'testString' + search_settings_server_side_search_model['password'] = 'testString' + search_settings_server_side_search_model['filter'] = 'testString' + search_settings_server_side_search_model['metadata'] = {'anyKey': 'anyValue'} + search_settings_server_side_search_model['apikey'] = 'testString' + search_settings_server_side_search_model['no_auth'] = True + search_settings_server_side_search_model['auth_type'] = 'basic' + + # Construct a dict representation of a SearchSettingsClientSideSearch model + search_settings_client_side_search_model = {} + search_settings_client_side_search_model['filter'] = 'testString' + search_settings_client_side_search_model['metadata'] = {'anyKey': 'anyValue'} + + # Construct a dict representation of a SearchSettings model + search_settings_model = {} + search_settings_model['discovery'] = search_settings_discovery_model + search_settings_model['messages'] = search_settings_messages_model + search_settings_model['schema_mapping'] = search_settings_schema_mapping_model + search_settings_model['elastic_search'] = search_settings_elastic_search_model + search_settings_model['conversational_search'] = search_settings_conversational_search_model + search_settings_model['server_side_search'] = search_settings_server_side_search_model + search_settings_model['client_side_search'] = search_settings_client_side_search_model + + # Construct a dict representation of a SkillImport model + skill_import_model = {} + skill_import_model['name'] = 'testString' + skill_import_model['description'] = 'testString' + skill_import_model['workspace'] = {'anyKey': 'anyValue'} + skill_import_model['dialog_settings'] = {'anyKey': 'anyValue'} + skill_import_model['search_settings'] = search_settings_model + skill_import_model['language'] = 'testString' + skill_import_model['type'] = 'action' + + # Construct a dict representation of a AssistantState model + assistant_state_model = {} + assistant_state_model['action_disabled'] = True + assistant_state_model['dialog_disabled'] = True + + # Set up parameter values + assistant_id = 'testString' + assistant_skills = [skill_import_model] + assistant_state = assistant_state_model + include_audit = False + + # Invoke method + response = _service.import_skills( + assistant_id, + assistant_skills, + assistant_state, + include_audit=include_audit, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 202 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['assistant_skills'] == [skill_import_model] + assert req_body['assistant_state'] == assistant_state_model + + def test_import_skills_all_params_with_retries(self): + # Enable retries and run test_import_skills_all_params. + _service.enable_retries() + self.test_import_skills_all_params() + + # Disable retries and run test_import_skills_all_params. + _service.disable_retries() + self.test_import_skills_all_params() + + @responses.activate + def test_import_skills_required_params(self): + """ + test_import_skills_required_params() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/skills_import') + mock_response = '{"assistant_id": "assistant_id", "status": "Available", "status_description": "status_description", "status_errors": [{"message": "message"}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=202, + ) + + # Construct a dict representation of a SearchSettingsDiscoveryAuthentication model + search_settings_discovery_authentication_model = {} + search_settings_discovery_authentication_model['basic'] = 'testString' + search_settings_discovery_authentication_model['bearer'] = 'testString' + + # Construct a dict representation of a SearchSettingsDiscovery model + search_settings_discovery_model = {} + search_settings_discovery_model['instance_id'] = 'testString' + search_settings_discovery_model['project_id'] = 'testString' + search_settings_discovery_model['url'] = 'testString' + search_settings_discovery_model['max_primary_results'] = 10000 + search_settings_discovery_model['max_total_results'] = 10000 + search_settings_discovery_model['confidence_threshold'] = 0.0 + search_settings_discovery_model['highlight'] = True + search_settings_discovery_model['find_answers'] = True + search_settings_discovery_model['authentication'] = search_settings_discovery_authentication_model + + # Construct a dict representation of a SearchSettingsMessages model + search_settings_messages_model = {} + search_settings_messages_model['success'] = 'testString' + search_settings_messages_model['error'] = 'testString' + search_settings_messages_model['no_result'] = 'testString' + + # Construct a dict representation of a SearchSettingsSchemaMapping model + search_settings_schema_mapping_model = {} + search_settings_schema_mapping_model['url'] = 'testString' + search_settings_schema_mapping_model['body'] = 'testString' + search_settings_schema_mapping_model['title'] = 'testString' + + # Construct a dict representation of a SearchSettingsElasticSearch model + search_settings_elastic_search_model = {} + search_settings_elastic_search_model['url'] = 'testString' + search_settings_elastic_search_model['port'] = 'testString' + search_settings_elastic_search_model['username'] = 'testString' + search_settings_elastic_search_model['password'] = 'testString' + search_settings_elastic_search_model['index'] = 'testString' + search_settings_elastic_search_model['filter'] = ['testString'] + search_settings_elastic_search_model['query_body'] = {'anyKey': 'anyValue'} + search_settings_elastic_search_model['managed_index'] = 'testString' + search_settings_elastic_search_model['apikey'] = 'testString' + + # Construct a dict representation of a SearchSettingsConversationalSearchResponseLength model + search_settings_conversational_search_response_length_model = {} + search_settings_conversational_search_response_length_model['option'] = 'moderate' + + # Construct a dict representation of a SearchSettingsConversationalSearchSearchConfidence model + search_settings_conversational_search_search_confidence_model = {} + search_settings_conversational_search_search_confidence_model['threshold'] = 'less_often' + + # Construct a dict representation of a SearchSettingsConversationalSearch model + search_settings_conversational_search_model = {} + search_settings_conversational_search_model['enabled'] = True + search_settings_conversational_search_model['response_length'] = search_settings_conversational_search_response_length_model + search_settings_conversational_search_model['search_confidence'] = search_settings_conversational_search_search_confidence_model + + # Construct a dict representation of a SearchSettingsServerSideSearch model + search_settings_server_side_search_model = {} + search_settings_server_side_search_model['url'] = 'testString' + search_settings_server_side_search_model['port'] = 'testString' + search_settings_server_side_search_model['username'] = 'testString' + search_settings_server_side_search_model['password'] = 'testString' + search_settings_server_side_search_model['filter'] = 'testString' + search_settings_server_side_search_model['metadata'] = {'anyKey': 'anyValue'} + search_settings_server_side_search_model['apikey'] = 'testString' + search_settings_server_side_search_model['no_auth'] = True + search_settings_server_side_search_model['auth_type'] = 'basic' + + # Construct a dict representation of a SearchSettingsClientSideSearch model + search_settings_client_side_search_model = {} + search_settings_client_side_search_model['filter'] = 'testString' + search_settings_client_side_search_model['metadata'] = {'anyKey': 'anyValue'} + + # Construct a dict representation of a SearchSettings model + search_settings_model = {} + search_settings_model['discovery'] = search_settings_discovery_model + search_settings_model['messages'] = search_settings_messages_model + search_settings_model['schema_mapping'] = search_settings_schema_mapping_model + search_settings_model['elastic_search'] = search_settings_elastic_search_model + search_settings_model['conversational_search'] = search_settings_conversational_search_model + search_settings_model['server_side_search'] = search_settings_server_side_search_model + search_settings_model['client_side_search'] = search_settings_client_side_search_model + + # Construct a dict representation of a SkillImport model + skill_import_model = {} + skill_import_model['name'] = 'testString' + skill_import_model['description'] = 'testString' + skill_import_model['workspace'] = {'anyKey': 'anyValue'} + skill_import_model['dialog_settings'] = {'anyKey': 'anyValue'} + skill_import_model['search_settings'] = search_settings_model + skill_import_model['language'] = 'testString' + skill_import_model['type'] = 'action' + + # Construct a dict representation of a AssistantState model + assistant_state_model = {} + assistant_state_model['action_disabled'] = True + assistant_state_model['dialog_disabled'] = True + + # Set up parameter values + assistant_id = 'testString' + assistant_skills = [skill_import_model] + assistant_state = assistant_state_model + + # Invoke method + response = _service.import_skills( + assistant_id, + assistant_skills, + assistant_state, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 202 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['assistant_skills'] == [skill_import_model] + assert req_body['assistant_state'] == assistant_state_model + + def test_import_skills_required_params_with_retries(self): + # Enable retries and run test_import_skills_required_params. + _service.enable_retries() + self.test_import_skills_required_params() + + # Disable retries and run test_import_skills_required_params. + _service.disable_retries() + self.test_import_skills_required_params() + + @responses.activate + def test_import_skills_value_error(self): + """ + test_import_skills_value_error() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/skills_import') + mock_response = '{"assistant_id": "assistant_id", "status": "Available", "status_description": "status_description", "status_errors": [{"message": "message"}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=202, + ) + + # Construct a dict representation of a SearchSettingsDiscoveryAuthentication model + search_settings_discovery_authentication_model = {} + search_settings_discovery_authentication_model['basic'] = 'testString' + search_settings_discovery_authentication_model['bearer'] = 'testString' + + # Construct a dict representation of a SearchSettingsDiscovery model + search_settings_discovery_model = {} + search_settings_discovery_model['instance_id'] = 'testString' + search_settings_discovery_model['project_id'] = 'testString' + search_settings_discovery_model['url'] = 'testString' + search_settings_discovery_model['max_primary_results'] = 10000 + search_settings_discovery_model['max_total_results'] = 10000 + search_settings_discovery_model['confidence_threshold'] = 0.0 + search_settings_discovery_model['highlight'] = True + search_settings_discovery_model['find_answers'] = True + search_settings_discovery_model['authentication'] = search_settings_discovery_authentication_model + + # Construct a dict representation of a SearchSettingsMessages model + search_settings_messages_model = {} + search_settings_messages_model['success'] = 'testString' + search_settings_messages_model['error'] = 'testString' + search_settings_messages_model['no_result'] = 'testString' + + # Construct a dict representation of a SearchSettingsSchemaMapping model + search_settings_schema_mapping_model = {} + search_settings_schema_mapping_model['url'] = 'testString' + search_settings_schema_mapping_model['body'] = 'testString' + search_settings_schema_mapping_model['title'] = 'testString' + + # Construct a dict representation of a SearchSettingsElasticSearch model + search_settings_elastic_search_model = {} + search_settings_elastic_search_model['url'] = 'testString' + search_settings_elastic_search_model['port'] = 'testString' + search_settings_elastic_search_model['username'] = 'testString' + search_settings_elastic_search_model['password'] = 'testString' + search_settings_elastic_search_model['index'] = 'testString' + search_settings_elastic_search_model['filter'] = ['testString'] + search_settings_elastic_search_model['query_body'] = {'anyKey': 'anyValue'} + search_settings_elastic_search_model['managed_index'] = 'testString' + search_settings_elastic_search_model['apikey'] = 'testString' + + # Construct a dict representation of a SearchSettingsConversationalSearchResponseLength model + search_settings_conversational_search_response_length_model = {} + search_settings_conversational_search_response_length_model['option'] = 'moderate' + + # Construct a dict representation of a SearchSettingsConversationalSearchSearchConfidence model + search_settings_conversational_search_search_confidence_model = {} + search_settings_conversational_search_search_confidence_model['threshold'] = 'less_often' + + # Construct a dict representation of a SearchSettingsConversationalSearch model + search_settings_conversational_search_model = {} + search_settings_conversational_search_model['enabled'] = True + search_settings_conversational_search_model['response_length'] = search_settings_conversational_search_response_length_model + search_settings_conversational_search_model['search_confidence'] = search_settings_conversational_search_search_confidence_model + + # Construct a dict representation of a SearchSettingsServerSideSearch model + search_settings_server_side_search_model = {} + search_settings_server_side_search_model['url'] = 'testString' + search_settings_server_side_search_model['port'] = 'testString' + search_settings_server_side_search_model['username'] = 'testString' + search_settings_server_side_search_model['password'] = 'testString' + search_settings_server_side_search_model['filter'] = 'testString' + search_settings_server_side_search_model['metadata'] = {'anyKey': 'anyValue'} + search_settings_server_side_search_model['apikey'] = 'testString' + search_settings_server_side_search_model['no_auth'] = True + search_settings_server_side_search_model['auth_type'] = 'basic' + + # Construct a dict representation of a SearchSettingsClientSideSearch model + search_settings_client_side_search_model = {} + search_settings_client_side_search_model['filter'] = 'testString' + search_settings_client_side_search_model['metadata'] = {'anyKey': 'anyValue'} + + # Construct a dict representation of a SearchSettings model + search_settings_model = {} + search_settings_model['discovery'] = search_settings_discovery_model + search_settings_model['messages'] = search_settings_messages_model + search_settings_model['schema_mapping'] = search_settings_schema_mapping_model + search_settings_model['elastic_search'] = search_settings_elastic_search_model + search_settings_model['conversational_search'] = search_settings_conversational_search_model + search_settings_model['server_side_search'] = search_settings_server_side_search_model + search_settings_model['client_side_search'] = search_settings_client_side_search_model + + # Construct a dict representation of a SkillImport model + skill_import_model = {} + skill_import_model['name'] = 'testString' + skill_import_model['description'] = 'testString' + skill_import_model['workspace'] = {'anyKey': 'anyValue'} + skill_import_model['dialog_settings'] = {'anyKey': 'anyValue'} + skill_import_model['search_settings'] = search_settings_model + skill_import_model['language'] = 'testString' + skill_import_model['type'] = 'action' + + # Construct a dict representation of a AssistantState model + assistant_state_model = {} + assistant_state_model['action_disabled'] = True + assistant_state_model['dialog_disabled'] = True + + # Set up parameter values + assistant_id = 'testString' + assistant_skills = [skill_import_model] + assistant_state = assistant_state_model + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "assistant_id": assistant_id, + "assistant_skills": assistant_skills, + "assistant_state": assistant_state, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.import_skills(**req_copy) + + def test_import_skills_value_error_with_retries(self): + # Enable retries and run test_import_skills_value_error. + _service.enable_retries() + self.test_import_skills_value_error() + + # Disable retries and run test_import_skills_value_error. + _service.disable_retries() + self.test_import_skills_value_error() + + +class TestImportSkillsStatus: + """ + Test Class for import_skills_status + """ + + @responses.activate + def test_import_skills_status_all_params(self): + """ + import_skills_status() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/skills_import/status') + mock_response = '{"assistant_id": "assistant_id", "status": "Available", "status_description": "status_description", "status_errors": [{"message": "message"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + + # Invoke method + response = _service.import_skills_status( + assistant_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_import_skills_status_all_params_with_retries(self): + # Enable retries and run test_import_skills_status_all_params. + _service.enable_retries() + self.test_import_skills_status_all_params() + + # Disable retries and run test_import_skills_status_all_params. + _service.disable_retries() + self.test_import_skills_status_all_params() + + @responses.activate + def test_import_skills_status_value_error(self): + """ + test_import_skills_status_value_error() + """ + # Set up mock + url = preprocess_url('/v2/assistants/testString/skills_import/status') + mock_response = '{"assistant_id": "assistant_id", "status": "Available", "status_description": "status_description", "status_errors": [{"message": "message"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + assistant_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "assistant_id": assistant_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.import_skills_status(**req_copy) + + def test_import_skills_status_value_error_with_retries(self): + # Enable retries and run test_import_skills_status_value_error. + _service.enable_retries() + self.test_import_skills_status_value_error() + + # Disable retries and run test_import_skills_status_value_error. + _service.disable_retries() + self.test_import_skills_status_value_error() + + +# endregion +############################################################################## +# End of Service: Skills +############################################################################## + + +############################################################################## +# Start of Model Tests +############################################################################## +# region + + +class TestModel_AgentAvailabilityMessage: + """ + Test Class for AgentAvailabilityMessage + """ + + def test_agent_availability_message_serialization(self): + """ + Test serialization/deserialization for AgentAvailabilityMessage + """ + + # Construct a json representation of a AgentAvailabilityMessage model + agent_availability_message_model_json = {} + agent_availability_message_model_json['message'] = 'testString' + + # Construct a model instance of AgentAvailabilityMessage by calling from_dict on the json representation + agent_availability_message_model = AgentAvailabilityMessage.from_dict(agent_availability_message_model_json) + assert agent_availability_message_model != False + + # Construct a model instance of AgentAvailabilityMessage by calling from_dict on the json representation + agent_availability_message_model_dict = AgentAvailabilityMessage.from_dict(agent_availability_message_model_json).__dict__ + agent_availability_message_model2 = AgentAvailabilityMessage(**agent_availability_message_model_dict) + + # Verify the model instances are equivalent + assert agent_availability_message_model == agent_availability_message_model2 + + # Convert model instance back to dict and verify no loss of data + agent_availability_message_model_json2 = agent_availability_message_model.to_dict() + assert agent_availability_message_model_json2 == agent_availability_message_model_json + + +class TestModel_AssistantCollection: + """ + Test Class for AssistantCollection + """ + + def test_assistant_collection_serialization(self): + """ + Test serialization/deserialization for AssistantCollection + """ + + # Construct dict forms of any model objects needed in order to build this model. + + assistant_data_model = {} # AssistantData + assistant_data_model['name'] = 'testString' + assistant_data_model['description'] = 'testString' + assistant_data_model['language'] = 'testString' + + pagination_model = {} # Pagination + pagination_model['refresh_url'] = 'testString' + pagination_model['next_url'] = 'testString' + pagination_model['total'] = 38 + pagination_model['matched'] = 38 + pagination_model['refresh_cursor'] = 'testString' + pagination_model['next_cursor'] = 'testString' + + # Construct a json representation of a AssistantCollection model + assistant_collection_model_json = {} + assistant_collection_model_json['assistants'] = [assistant_data_model] + assistant_collection_model_json['pagination'] = pagination_model + + # Construct a model instance of AssistantCollection by calling from_dict on the json representation + assistant_collection_model = AssistantCollection.from_dict(assistant_collection_model_json) + assert assistant_collection_model != False + + # Construct a model instance of AssistantCollection by calling from_dict on the json representation + assistant_collection_model_dict = AssistantCollection.from_dict(assistant_collection_model_json).__dict__ + assistant_collection_model2 = AssistantCollection(**assistant_collection_model_dict) + + # Verify the model instances are equivalent + assert assistant_collection_model == assistant_collection_model2 + + # Convert model instance back to dict and verify no loss of data + assistant_collection_model_json2 = assistant_collection_model.to_dict() + assert assistant_collection_model_json2 == assistant_collection_model_json + + +class TestModel_AssistantData: + """ + Test Class for AssistantData + """ + + def test_assistant_data_serialization(self): + """ + Test serialization/deserialization for AssistantData + """ + + # Construct a json representation of a AssistantData model + assistant_data_model_json = {} + assistant_data_model_json['name'] = 'testString' + assistant_data_model_json['description'] = 'testString' + assistant_data_model_json['language'] = 'testString' + + # Construct a model instance of AssistantData by calling from_dict on the json representation + assistant_data_model = AssistantData.from_dict(assistant_data_model_json) + assert assistant_data_model != False + + # Construct a model instance of AssistantData by calling from_dict on the json representation + assistant_data_model_dict = AssistantData.from_dict(assistant_data_model_json).__dict__ + assistant_data_model2 = AssistantData(**assistant_data_model_dict) + + # Verify the model instances are equivalent + assert assistant_data_model == assistant_data_model2 + + # Convert model instance back to dict and verify no loss of data + assistant_data_model_json2 = assistant_data_model.to_dict() + assert assistant_data_model_json2 == assistant_data_model_json + + +class TestModel_AssistantSkill: + """ + Test Class for AssistantSkill + """ + + def test_assistant_skill_serialization(self): + """ + Test serialization/deserialization for AssistantSkill + """ + + # Construct a json representation of a AssistantSkill model + assistant_skill_model_json = {} + assistant_skill_model_json['skill_id'] = 'testString' + assistant_skill_model_json['type'] = 'dialog' + + # Construct a model instance of AssistantSkill by calling from_dict on the json representation + assistant_skill_model = AssistantSkill.from_dict(assistant_skill_model_json) + assert assistant_skill_model != False + + # Construct a model instance of AssistantSkill by calling from_dict on the json representation + assistant_skill_model_dict = AssistantSkill.from_dict(assistant_skill_model_json).__dict__ + assistant_skill_model2 = AssistantSkill(**assistant_skill_model_dict) + + # Verify the model instances are equivalent + assert assistant_skill_model == assistant_skill_model2 + + # Convert model instance back to dict and verify no loss of data + assistant_skill_model_json2 = assistant_skill_model.to_dict() + assert assistant_skill_model_json2 == assistant_skill_model_json + + +class TestModel_AssistantState: + """ + Test Class for AssistantState + """ + + def test_assistant_state_serialization(self): + """ + Test serialization/deserialization for AssistantState + """ + + # Construct a json representation of a AssistantState model + assistant_state_model_json = {} + assistant_state_model_json['action_disabled'] = True + assistant_state_model_json['dialog_disabled'] = True + + # Construct a model instance of AssistantState by calling from_dict on the json representation + assistant_state_model = AssistantState.from_dict(assistant_state_model_json) + assert assistant_state_model != False + + # Construct a model instance of AssistantState by calling from_dict on the json representation + assistant_state_model_dict = AssistantState.from_dict(assistant_state_model_json).__dict__ + assistant_state_model2 = AssistantState(**assistant_state_model_dict) + + # Verify the model instances are equivalent + assert assistant_state_model == assistant_state_model2 + + # Convert model instance back to dict and verify no loss of data + assistant_state_model_json2 = assistant_state_model.to_dict() + assert assistant_state_model_json2 == assistant_state_model_json + + +class TestModel_BaseEnvironmentOrchestration: + """ + Test Class for BaseEnvironmentOrchestration + """ + + def test_base_environment_orchestration_serialization(self): + """ + Test serialization/deserialization for BaseEnvironmentOrchestration + """ + + # Construct a json representation of a BaseEnvironmentOrchestration model + base_environment_orchestration_model_json = {} + base_environment_orchestration_model_json['search_skill_fallback'] = True + + # Construct a model instance of BaseEnvironmentOrchestration by calling from_dict on the json representation + base_environment_orchestration_model = BaseEnvironmentOrchestration.from_dict(base_environment_orchestration_model_json) + assert base_environment_orchestration_model != False + + # Construct a model instance of BaseEnvironmentOrchestration by calling from_dict on the json representation + base_environment_orchestration_model_dict = BaseEnvironmentOrchestration.from_dict(base_environment_orchestration_model_json).__dict__ + base_environment_orchestration_model2 = BaseEnvironmentOrchestration(**base_environment_orchestration_model_dict) + + # Verify the model instances are equivalent + assert base_environment_orchestration_model == base_environment_orchestration_model2 + + # Convert model instance back to dict and verify no loss of data + base_environment_orchestration_model_json2 = base_environment_orchestration_model.to_dict() + assert base_environment_orchestration_model_json2 == base_environment_orchestration_model_json + + +class TestModel_BaseEnvironmentReleaseReference: + """ + Test Class for BaseEnvironmentReleaseReference + """ + + def test_base_environment_release_reference_serialization(self): + """ + Test serialization/deserialization for BaseEnvironmentReleaseReference + """ + + # Construct a json representation of a BaseEnvironmentReleaseReference model + base_environment_release_reference_model_json = {} + base_environment_release_reference_model_json['release'] = 'testString' + + # Construct a model instance of BaseEnvironmentReleaseReference by calling from_dict on the json representation + base_environment_release_reference_model = BaseEnvironmentReleaseReference.from_dict(base_environment_release_reference_model_json) + assert base_environment_release_reference_model != False + + # Construct a model instance of BaseEnvironmentReleaseReference by calling from_dict on the json representation + base_environment_release_reference_model_dict = BaseEnvironmentReleaseReference.from_dict(base_environment_release_reference_model_json).__dict__ + base_environment_release_reference_model2 = BaseEnvironmentReleaseReference(**base_environment_release_reference_model_dict) + + # Verify the model instances are equivalent + assert base_environment_release_reference_model == base_environment_release_reference_model2 + + # Convert model instance back to dict and verify no loss of data + base_environment_release_reference_model_json2 = base_environment_release_reference_model.to_dict() + assert base_environment_release_reference_model_json2 == base_environment_release_reference_model_json + + +class TestModel_BulkClassifyOutput: + """ + Test Class for BulkClassifyOutput + """ + + def test_bulk_classify_output_serialization(self): + """ + Test serialization/deserialization for BulkClassifyOutput + """ + + # Construct dict forms of any model objects needed in order to build this model. + + bulk_classify_utterance_model = {} # BulkClassifyUtterance + bulk_classify_utterance_model['text'] = 'testString' + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + runtime_entity_model['skill'] = 'testString' + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + runtime_intent_model['skill'] = 'testString' + + # Construct a json representation of a BulkClassifyOutput model + bulk_classify_output_model_json = {} + bulk_classify_output_model_json['input'] = bulk_classify_utterance_model + bulk_classify_output_model_json['entities'] = [runtime_entity_model] + bulk_classify_output_model_json['intents'] = [runtime_intent_model] + + # Construct a model instance of BulkClassifyOutput by calling from_dict on the json representation + bulk_classify_output_model = BulkClassifyOutput.from_dict(bulk_classify_output_model_json) + assert bulk_classify_output_model != False + + # Construct a model instance of BulkClassifyOutput by calling from_dict on the json representation + bulk_classify_output_model_dict = BulkClassifyOutput.from_dict(bulk_classify_output_model_json).__dict__ + bulk_classify_output_model2 = BulkClassifyOutput(**bulk_classify_output_model_dict) + + # Verify the model instances are equivalent + assert bulk_classify_output_model == bulk_classify_output_model2 + + # Convert model instance back to dict and verify no loss of data + bulk_classify_output_model_json2 = bulk_classify_output_model.to_dict() + assert bulk_classify_output_model_json2 == bulk_classify_output_model_json + + +class TestModel_BulkClassifyResponse: + """ + Test Class for BulkClassifyResponse + """ + + def test_bulk_classify_response_serialization(self): + """ + Test serialization/deserialization for BulkClassifyResponse + """ + + # Construct dict forms of any model objects needed in order to build this model. + + bulk_classify_utterance_model = {} # BulkClassifyUtterance + bulk_classify_utterance_model['text'] = 'testString' + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + runtime_entity_model['skill'] = 'testString' + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + runtime_intent_model['skill'] = 'testString' + + bulk_classify_output_model = {} # BulkClassifyOutput + bulk_classify_output_model['input'] = bulk_classify_utterance_model + bulk_classify_output_model['entities'] = [runtime_entity_model] + bulk_classify_output_model['intents'] = [runtime_intent_model] + + # Construct a json representation of a BulkClassifyResponse model + bulk_classify_response_model_json = {} + bulk_classify_response_model_json['output'] = [bulk_classify_output_model] + + # Construct a model instance of BulkClassifyResponse by calling from_dict on the json representation + bulk_classify_response_model = BulkClassifyResponse.from_dict(bulk_classify_response_model_json) + assert bulk_classify_response_model != False + + # Construct a model instance of BulkClassifyResponse by calling from_dict on the json representation + bulk_classify_response_model_dict = BulkClassifyResponse.from_dict(bulk_classify_response_model_json).__dict__ + bulk_classify_response_model2 = BulkClassifyResponse(**bulk_classify_response_model_dict) + + # Verify the model instances are equivalent + assert bulk_classify_response_model == bulk_classify_response_model2 + + # Convert model instance back to dict and verify no loss of data + bulk_classify_response_model_json2 = bulk_classify_response_model.to_dict() + assert bulk_classify_response_model_json2 == bulk_classify_response_model_json + + +class TestModel_BulkClassifyUtterance: + """ + Test Class for BulkClassifyUtterance + """ + + def test_bulk_classify_utterance_serialization(self): + """ + Test serialization/deserialization for BulkClassifyUtterance + """ + + # Construct a json representation of a BulkClassifyUtterance model + bulk_classify_utterance_model_json = {} + bulk_classify_utterance_model_json['text'] = 'testString' + + # Construct a model instance of BulkClassifyUtterance by calling from_dict on the json representation + bulk_classify_utterance_model = BulkClassifyUtterance.from_dict(bulk_classify_utterance_model_json) + assert bulk_classify_utterance_model != False + + # Construct a model instance of BulkClassifyUtterance by calling from_dict on the json representation + bulk_classify_utterance_model_dict = BulkClassifyUtterance.from_dict(bulk_classify_utterance_model_json).__dict__ + bulk_classify_utterance_model2 = BulkClassifyUtterance(**bulk_classify_utterance_model_dict) + + # Verify the model instances are equivalent + assert bulk_classify_utterance_model == bulk_classify_utterance_model2 + + # Convert model instance back to dict and verify no loss of data + bulk_classify_utterance_model_json2 = bulk_classify_utterance_model.to_dict() + assert bulk_classify_utterance_model_json2 == bulk_classify_utterance_model_json + + +class TestModel_CaptureGroup: + """ + Test Class for CaptureGroup + """ + + def test_capture_group_serialization(self): + """ + Test serialization/deserialization for CaptureGroup + """ + + # Construct a json representation of a CaptureGroup model + capture_group_model_json = {} + capture_group_model_json['group'] = 'testString' + capture_group_model_json['location'] = [38] + + # Construct a model instance of CaptureGroup by calling from_dict on the json representation + capture_group_model = CaptureGroup.from_dict(capture_group_model_json) + assert capture_group_model != False + + # Construct a model instance of CaptureGroup by calling from_dict on the json representation + capture_group_model_dict = CaptureGroup.from_dict(capture_group_model_json).__dict__ + capture_group_model2 = CaptureGroup(**capture_group_model_dict) + + # Verify the model instances are equivalent + assert capture_group_model == capture_group_model2 + + # Convert model instance back to dict and verify no loss of data + capture_group_model_json2 = capture_group_model.to_dict() + assert capture_group_model_json2 == capture_group_model_json + + +class TestModel_ChannelTransferInfo: + """ + Test Class for ChannelTransferInfo + """ + + def test_channel_transfer_info_serialization(self): + """ + Test serialization/deserialization for ChannelTransferInfo + """ + + # Construct dict forms of any model objects needed in order to build this model. + + channel_transfer_target_chat_model = {} # ChannelTransferTargetChat + channel_transfer_target_chat_model['url'] = 'testString' + + channel_transfer_target_model = {} # ChannelTransferTarget + channel_transfer_target_model['chat'] = channel_transfer_target_chat_model + + # Construct a json representation of a ChannelTransferInfo model + channel_transfer_info_model_json = {} + channel_transfer_info_model_json['target'] = channel_transfer_target_model + + # Construct a model instance of ChannelTransferInfo by calling from_dict on the json representation + channel_transfer_info_model = ChannelTransferInfo.from_dict(channel_transfer_info_model_json) + assert channel_transfer_info_model != False + + # Construct a model instance of ChannelTransferInfo by calling from_dict on the json representation + channel_transfer_info_model_dict = ChannelTransferInfo.from_dict(channel_transfer_info_model_json).__dict__ + channel_transfer_info_model2 = ChannelTransferInfo(**channel_transfer_info_model_dict) + + # Verify the model instances are equivalent + assert channel_transfer_info_model == channel_transfer_info_model2 + + # Convert model instance back to dict and verify no loss of data + channel_transfer_info_model_json2 = channel_transfer_info_model.to_dict() + assert channel_transfer_info_model_json2 == channel_transfer_info_model_json + + +class TestModel_ChannelTransferTarget: + """ + Test Class for ChannelTransferTarget + """ + + def test_channel_transfer_target_serialization(self): + """ + Test serialization/deserialization for ChannelTransferTarget + """ + + # Construct dict forms of any model objects needed in order to build this model. + + channel_transfer_target_chat_model = {} # ChannelTransferTargetChat + channel_transfer_target_chat_model['url'] = 'testString' + + # Construct a json representation of a ChannelTransferTarget model + channel_transfer_target_model_json = {} + channel_transfer_target_model_json['chat'] = channel_transfer_target_chat_model + + # Construct a model instance of ChannelTransferTarget by calling from_dict on the json representation + channel_transfer_target_model = ChannelTransferTarget.from_dict(channel_transfer_target_model_json) + assert channel_transfer_target_model != False + + # Construct a model instance of ChannelTransferTarget by calling from_dict on the json representation + channel_transfer_target_model_dict = ChannelTransferTarget.from_dict(channel_transfer_target_model_json).__dict__ + channel_transfer_target_model2 = ChannelTransferTarget(**channel_transfer_target_model_dict) + + # Verify the model instances are equivalent + assert channel_transfer_target_model == channel_transfer_target_model2 + + # Convert model instance back to dict and verify no loss of data + channel_transfer_target_model_json2 = channel_transfer_target_model.to_dict() + assert channel_transfer_target_model_json2 == channel_transfer_target_model_json + + +class TestModel_ChannelTransferTargetChat: + """ + Test Class for ChannelTransferTargetChat + """ + + def test_channel_transfer_target_chat_serialization(self): + """ + Test serialization/deserialization for ChannelTransferTargetChat + """ + + # Construct a json representation of a ChannelTransferTargetChat model + channel_transfer_target_chat_model_json = {} + channel_transfer_target_chat_model_json['url'] = 'testString' + + # Construct a model instance of ChannelTransferTargetChat by calling from_dict on the json representation + channel_transfer_target_chat_model = ChannelTransferTargetChat.from_dict(channel_transfer_target_chat_model_json) + assert channel_transfer_target_chat_model != False + + # Construct a model instance of ChannelTransferTargetChat by calling from_dict on the json representation + channel_transfer_target_chat_model_dict = ChannelTransferTargetChat.from_dict(channel_transfer_target_chat_model_json).__dict__ + channel_transfer_target_chat_model2 = ChannelTransferTargetChat(**channel_transfer_target_chat_model_dict) + + # Verify the model instances are equivalent + assert channel_transfer_target_chat_model == channel_transfer_target_chat_model2 + + # Convert model instance back to dict and verify no loss of data + channel_transfer_target_chat_model_json2 = channel_transfer_target_chat_model.to_dict() + assert channel_transfer_target_chat_model_json2 == channel_transfer_target_chat_model_json + + +class TestModel_ClientAction: + """ + Test Class for ClientAction + """ + + def test_client_action_serialization(self): + """ + Test serialization/deserialization for ClientAction + """ + + # Construct a json representation of a ClientAction model + client_action_model_json = {} + client_action_model_json['name'] = 'testString' + client_action_model_json['result_variable'] = 'testString' + client_action_model_json['type'] = 'testString' + client_action_model_json['skill'] = 'main skill' + client_action_model_json['parameters'] = {'anyKey': 'anyValue'} + + # Construct a model instance of ClientAction by calling from_dict on the json representation + client_action_model = ClientAction.from_dict(client_action_model_json) + assert client_action_model != False + + # Construct a model instance of ClientAction by calling from_dict on the json representation + client_action_model_dict = ClientAction.from_dict(client_action_model_json).__dict__ + client_action_model2 = ClientAction(**client_action_model_dict) + + # Verify the model instances are equivalent + assert client_action_model == client_action_model2 + + # Convert model instance back to dict and verify no loss of data + client_action_model_json2 = client_action_model.to_dict() + assert client_action_model_json2 == client_action_model_json + + +class TestModel_CreateAssistantReleaseImportResponse: + """ + Test Class for CreateAssistantReleaseImportResponse + """ + + def test_create_assistant_release_import_response_serialization(self): + """ + Test serialization/deserialization for CreateAssistantReleaseImportResponse + """ + + # Construct a json representation of a CreateAssistantReleaseImportResponse model + create_assistant_release_import_response_model_json = {} + create_assistant_release_import_response_model_json['skill_impact_in_draft'] = ['action'] + + # Construct a model instance of CreateAssistantReleaseImportResponse by calling from_dict on the json representation + create_assistant_release_import_response_model = CreateAssistantReleaseImportResponse.from_dict(create_assistant_release_import_response_model_json) + assert create_assistant_release_import_response_model != False + + # Construct a model instance of CreateAssistantReleaseImportResponse by calling from_dict on the json representation + create_assistant_release_import_response_model_dict = CreateAssistantReleaseImportResponse.from_dict(create_assistant_release_import_response_model_json).__dict__ + create_assistant_release_import_response_model2 = CreateAssistantReleaseImportResponse(**create_assistant_release_import_response_model_dict) + + # Verify the model instances are equivalent + assert create_assistant_release_import_response_model == create_assistant_release_import_response_model2 + + # Convert model instance back to dict and verify no loss of data + create_assistant_release_import_response_model_json2 = create_assistant_release_import_response_model.to_dict() + assert create_assistant_release_import_response_model_json2 == create_assistant_release_import_response_model_json + + +class TestModel_CreateReleaseExportWithStatusErrors: + """ + Test Class for CreateReleaseExportWithStatusErrors + """ + + def test_create_release_export_with_status_errors_serialization(self): + """ + Test serialization/deserialization for CreateReleaseExportWithStatusErrors + """ + + # Construct a json representation of a CreateReleaseExportWithStatusErrors model + create_release_export_with_status_errors_model_json = {} + + # Construct a model instance of CreateReleaseExportWithStatusErrors by calling from_dict on the json representation + create_release_export_with_status_errors_model = CreateReleaseExportWithStatusErrors.from_dict(create_release_export_with_status_errors_model_json) + assert create_release_export_with_status_errors_model != False + + # Construct a model instance of CreateReleaseExportWithStatusErrors by calling from_dict on the json representation + create_release_export_with_status_errors_model_dict = CreateReleaseExportWithStatusErrors.from_dict(create_release_export_with_status_errors_model_json).__dict__ + create_release_export_with_status_errors_model2 = CreateReleaseExportWithStatusErrors(**create_release_export_with_status_errors_model_dict) + + # Verify the model instances are equivalent + assert create_release_export_with_status_errors_model == create_release_export_with_status_errors_model2 + + # Convert model instance back to dict and verify no loss of data + create_release_export_with_status_errors_model_json2 = create_release_export_with_status_errors_model.to_dict() + assert create_release_export_with_status_errors_model_json2 == create_release_export_with_status_errors_model_json + + +class TestModel_DialogLogMessage: + """ + Test Class for DialogLogMessage + """ + + def test_dialog_log_message_serialization(self): + """ + Test serialization/deserialization for DialogLogMessage + """ + + # Construct dict forms of any model objects needed in order to build this model. + + log_message_source_model = {} # LogMessageSourceDialogNode + log_message_source_model['type'] = 'dialog_node' + log_message_source_model['dialog_node'] = 'testString' + + # Construct a json representation of a DialogLogMessage model + dialog_log_message_model_json = {} + dialog_log_message_model_json['level'] = 'info' + dialog_log_message_model_json['message'] = 'testString' + dialog_log_message_model_json['code'] = 'testString' + dialog_log_message_model_json['source'] = log_message_source_model + + # Construct a model instance of DialogLogMessage by calling from_dict on the json representation + dialog_log_message_model = DialogLogMessage.from_dict(dialog_log_message_model_json) + assert dialog_log_message_model != False + + # Construct a model instance of DialogLogMessage by calling from_dict on the json representation + dialog_log_message_model_dict = DialogLogMessage.from_dict(dialog_log_message_model_json).__dict__ + dialog_log_message_model2 = DialogLogMessage(**dialog_log_message_model_dict) + + # Verify the model instances are equivalent + assert dialog_log_message_model == dialog_log_message_model2 + + # Convert model instance back to dict and verify no loss of data + dialog_log_message_model_json2 = dialog_log_message_model.to_dict() + assert dialog_log_message_model_json2 == dialog_log_message_model_json + + +class TestModel_DialogNodeAction: + """ + Test Class for DialogNodeAction + """ + + def test_dialog_node_action_serialization(self): + """ + Test serialization/deserialization for DialogNodeAction + """ + + # Construct a json representation of a DialogNodeAction model + dialog_node_action_model_json = {} + dialog_node_action_model_json['name'] = 'testString' + dialog_node_action_model_json['type'] = 'client' + dialog_node_action_model_json['parameters'] = {'anyKey': 'anyValue'} + dialog_node_action_model_json['result_variable'] = 'testString' + dialog_node_action_model_json['credentials'] = 'testString' + + # Construct a model instance of DialogNodeAction by calling from_dict on the json representation + dialog_node_action_model = DialogNodeAction.from_dict(dialog_node_action_model_json) + assert dialog_node_action_model != False + + # Construct a model instance of DialogNodeAction by calling from_dict on the json representation + dialog_node_action_model_dict = DialogNodeAction.from_dict(dialog_node_action_model_json).__dict__ + dialog_node_action_model2 = DialogNodeAction(**dialog_node_action_model_dict) + + # Verify the model instances are equivalent + assert dialog_node_action_model == dialog_node_action_model2 + + # Convert model instance back to dict and verify no loss of data + dialog_node_action_model_json2 = dialog_node_action_model.to_dict() + assert dialog_node_action_model_json2 == dialog_node_action_model_json + + +class TestModel_DialogNodeOutputConnectToAgentTransferInfo: + """ + Test Class for DialogNodeOutputConnectToAgentTransferInfo + """ + + def test_dialog_node_output_connect_to_agent_transfer_info_serialization(self): + """ + Test serialization/deserialization for DialogNodeOutputConnectToAgentTransferInfo + """ + + # Construct a json representation of a DialogNodeOutputConnectToAgentTransferInfo model + dialog_node_output_connect_to_agent_transfer_info_model_json = {} + dialog_node_output_connect_to_agent_transfer_info_model_json['target'] = {'key1': {'anyKey': 'anyValue'}} + + # Construct a model instance of DialogNodeOutputConnectToAgentTransferInfo by calling from_dict on the json representation + dialog_node_output_connect_to_agent_transfer_info_model = DialogNodeOutputConnectToAgentTransferInfo.from_dict(dialog_node_output_connect_to_agent_transfer_info_model_json) + assert dialog_node_output_connect_to_agent_transfer_info_model != False + + # Construct a model instance of DialogNodeOutputConnectToAgentTransferInfo by calling from_dict on the json representation + dialog_node_output_connect_to_agent_transfer_info_model_dict = DialogNodeOutputConnectToAgentTransferInfo.from_dict(dialog_node_output_connect_to_agent_transfer_info_model_json).__dict__ + dialog_node_output_connect_to_agent_transfer_info_model2 = DialogNodeOutputConnectToAgentTransferInfo(**dialog_node_output_connect_to_agent_transfer_info_model_dict) + + # Verify the model instances are equivalent + assert dialog_node_output_connect_to_agent_transfer_info_model == dialog_node_output_connect_to_agent_transfer_info_model2 + + # Convert model instance back to dict and verify no loss of data + dialog_node_output_connect_to_agent_transfer_info_model_json2 = dialog_node_output_connect_to_agent_transfer_info_model.to_dict() + assert dialog_node_output_connect_to_agent_transfer_info_model_json2 == dialog_node_output_connect_to_agent_transfer_info_model_json + + +class TestModel_DialogNodeOutputOptionsElement: + """ + Test Class for DialogNodeOutputOptionsElement + """ + + def test_dialog_node_output_options_element_serialization(self): + """ + Test serialization/deserialization for DialogNodeOutputOptionsElement + """ + + # Construct dict forms of any model objects needed in order to build this model. + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + runtime_intent_model['skill'] = 'testString' + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + runtime_entity_model['skill'] = 'testString' + + message_input_attachment_model = {} # MessageInputAttachment + message_input_attachment_model['url'] = 'testString' + message_input_attachment_model['media_type'] = 'testString' + + request_analytics_model = {} # RequestAnalytics + request_analytics_model['browser'] = 'testString' + request_analytics_model['device'] = 'testString' + request_analytics_model['pageUrl'] = 'testString' + + message_input_options_spelling_model = {} # MessageInputOptionsSpelling + message_input_options_spelling_model['suggestions'] = True + message_input_options_spelling_model['auto_correct'] = True + + message_input_options_model = {} # MessageInputOptions + message_input_options_model['restart'] = False + message_input_options_model['alternate_intents'] = False + message_input_options_model['async_callout'] = False + message_input_options_model['spelling'] = message_input_options_spelling_model + message_input_options_model['debug'] = False + message_input_options_model['return_context'] = False + message_input_options_model['export'] = False + + message_input_model = {} # MessageInput + message_input_model['message_type'] = 'text' + message_input_model['text'] = 'testString' + message_input_model['intents'] = [runtime_intent_model] + message_input_model['entities'] = [runtime_entity_model] + message_input_model['suggestion_id'] = 'testString' + message_input_model['attachments'] = [message_input_attachment_model] + message_input_model['analytics'] = request_analytics_model + message_input_model['options'] = message_input_options_model + + dialog_node_output_options_element_value_model = {} # DialogNodeOutputOptionsElementValue + dialog_node_output_options_element_value_model['input'] = message_input_model + + # Construct a json representation of a DialogNodeOutputOptionsElement model + dialog_node_output_options_element_model_json = {} + dialog_node_output_options_element_model_json['label'] = 'testString' + dialog_node_output_options_element_model_json['value'] = dialog_node_output_options_element_value_model + + # Construct a model instance of DialogNodeOutputOptionsElement by calling from_dict on the json representation + dialog_node_output_options_element_model = DialogNodeOutputOptionsElement.from_dict(dialog_node_output_options_element_model_json) + assert dialog_node_output_options_element_model != False + + # Construct a model instance of DialogNodeOutputOptionsElement by calling from_dict on the json representation + dialog_node_output_options_element_model_dict = DialogNodeOutputOptionsElement.from_dict(dialog_node_output_options_element_model_json).__dict__ + dialog_node_output_options_element_model2 = DialogNodeOutputOptionsElement(**dialog_node_output_options_element_model_dict) + + # Verify the model instances are equivalent + assert dialog_node_output_options_element_model == dialog_node_output_options_element_model2 + + # Convert model instance back to dict and verify no loss of data + dialog_node_output_options_element_model_json2 = dialog_node_output_options_element_model.to_dict() + assert dialog_node_output_options_element_model_json2 == dialog_node_output_options_element_model_json + + +class TestModel_DialogNodeOutputOptionsElementValue: + """ + Test Class for DialogNodeOutputOptionsElementValue + """ + + def test_dialog_node_output_options_element_value_serialization(self): + """ + Test serialization/deserialization for DialogNodeOutputOptionsElementValue + """ + + # Construct dict forms of any model objects needed in order to build this model. + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + runtime_intent_model['skill'] = 'testString' + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + runtime_entity_model['skill'] = 'testString' + + message_input_attachment_model = {} # MessageInputAttachment + message_input_attachment_model['url'] = 'testString' + message_input_attachment_model['media_type'] = 'testString' + + request_analytics_model = {} # RequestAnalytics + request_analytics_model['browser'] = 'testString' + request_analytics_model['device'] = 'testString' + request_analytics_model['pageUrl'] = 'testString' + + message_input_options_spelling_model = {} # MessageInputOptionsSpelling + message_input_options_spelling_model['suggestions'] = True + message_input_options_spelling_model['auto_correct'] = True + + message_input_options_model = {} # MessageInputOptions + message_input_options_model['restart'] = False + message_input_options_model['alternate_intents'] = False + message_input_options_model['async_callout'] = False + message_input_options_model['spelling'] = message_input_options_spelling_model + message_input_options_model['debug'] = False + message_input_options_model['return_context'] = False + message_input_options_model['export'] = False + + message_input_model = {} # MessageInput + message_input_model['message_type'] = 'text' + message_input_model['text'] = 'testString' + message_input_model['intents'] = [runtime_intent_model] + message_input_model['entities'] = [runtime_entity_model] + message_input_model['suggestion_id'] = 'testString' + message_input_model['attachments'] = [message_input_attachment_model] + message_input_model['analytics'] = request_analytics_model + message_input_model['options'] = message_input_options_model + + # Construct a json representation of a DialogNodeOutputOptionsElementValue model + dialog_node_output_options_element_value_model_json = {} + dialog_node_output_options_element_value_model_json['input'] = message_input_model + + # Construct a model instance of DialogNodeOutputOptionsElementValue by calling from_dict on the json representation + dialog_node_output_options_element_value_model = DialogNodeOutputOptionsElementValue.from_dict(dialog_node_output_options_element_value_model_json) + assert dialog_node_output_options_element_value_model != False + + # Construct a model instance of DialogNodeOutputOptionsElementValue by calling from_dict on the json representation + dialog_node_output_options_element_value_model_dict = DialogNodeOutputOptionsElementValue.from_dict(dialog_node_output_options_element_value_model_json).__dict__ + dialog_node_output_options_element_value_model2 = DialogNodeOutputOptionsElementValue(**dialog_node_output_options_element_value_model_dict) + + # Verify the model instances are equivalent + assert dialog_node_output_options_element_value_model == dialog_node_output_options_element_value_model2 + + # Convert model instance back to dict and verify no loss of data + dialog_node_output_options_element_value_model_json2 = dialog_node_output_options_element_value_model.to_dict() + assert dialog_node_output_options_element_value_model_json2 == dialog_node_output_options_element_value_model_json + + +class TestModel_DialogNodeVisited: + """ + Test Class for DialogNodeVisited + """ + + def test_dialog_node_visited_serialization(self): + """ + Test serialization/deserialization for DialogNodeVisited + """ + + # Construct a json representation of a DialogNodeVisited model + dialog_node_visited_model_json = {} + dialog_node_visited_model_json['dialog_node'] = 'testString' + dialog_node_visited_model_json['title'] = 'testString' + dialog_node_visited_model_json['conditions'] = 'testString' + + # Construct a model instance of DialogNodeVisited by calling from_dict on the json representation + dialog_node_visited_model = DialogNodeVisited.from_dict(dialog_node_visited_model_json) + assert dialog_node_visited_model != False + + # Construct a model instance of DialogNodeVisited by calling from_dict on the json representation + dialog_node_visited_model_dict = DialogNodeVisited.from_dict(dialog_node_visited_model_json).__dict__ + dialog_node_visited_model2 = DialogNodeVisited(**dialog_node_visited_model_dict) + + # Verify the model instances are equivalent + assert dialog_node_visited_model == dialog_node_visited_model2 + + # Convert model instance back to dict and verify no loss of data + dialog_node_visited_model_json2 = dialog_node_visited_model.to_dict() + assert dialog_node_visited_model_json2 == dialog_node_visited_model_json + + +class TestModel_DialogSuggestion: + """ + Test Class for DialogSuggestion + """ + + def test_dialog_suggestion_serialization(self): + """ + Test serialization/deserialization for DialogSuggestion + """ + + # Construct dict forms of any model objects needed in order to build this model. + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + runtime_intent_model['skill'] = 'testString' + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + runtime_entity_model['skill'] = 'testString' + + message_input_attachment_model = {} # MessageInputAttachment + message_input_attachment_model['url'] = 'testString' + message_input_attachment_model['media_type'] = 'testString' + + request_analytics_model = {} # RequestAnalytics + request_analytics_model['browser'] = 'testString' + request_analytics_model['device'] = 'testString' + request_analytics_model['pageUrl'] = 'testString' + + message_input_options_spelling_model = {} # MessageInputOptionsSpelling + message_input_options_spelling_model['suggestions'] = True + message_input_options_spelling_model['auto_correct'] = True + + message_input_options_model = {} # MessageInputOptions + message_input_options_model['restart'] = False + message_input_options_model['alternate_intents'] = False + message_input_options_model['async_callout'] = False + message_input_options_model['spelling'] = message_input_options_spelling_model + message_input_options_model['debug'] = False + message_input_options_model['return_context'] = False + message_input_options_model['export'] = False + + message_input_model = {} # MessageInput + message_input_model['message_type'] = 'text' + message_input_model['text'] = 'testString' + message_input_model['intents'] = [runtime_intent_model] + message_input_model['entities'] = [runtime_entity_model] + message_input_model['suggestion_id'] = 'testString' + message_input_model['attachments'] = [message_input_attachment_model] + message_input_model['analytics'] = request_analytics_model + message_input_model['options'] = message_input_options_model + + dialog_suggestion_value_model = {} # DialogSuggestionValue + dialog_suggestion_value_model['input'] = message_input_model + + # Construct a json representation of a DialogSuggestion model + dialog_suggestion_model_json = {} + dialog_suggestion_model_json['label'] = 'testString' + dialog_suggestion_model_json['value'] = dialog_suggestion_value_model + dialog_suggestion_model_json['output'] = {'anyKey': 'anyValue'} + + # Construct a model instance of DialogSuggestion by calling from_dict on the json representation + dialog_suggestion_model = DialogSuggestion.from_dict(dialog_suggestion_model_json) + assert dialog_suggestion_model != False + + # Construct a model instance of DialogSuggestion by calling from_dict on the json representation + dialog_suggestion_model_dict = DialogSuggestion.from_dict(dialog_suggestion_model_json).__dict__ + dialog_suggestion_model2 = DialogSuggestion(**dialog_suggestion_model_dict) + + # Verify the model instances are equivalent + assert dialog_suggestion_model == dialog_suggestion_model2 + + # Convert model instance back to dict and verify no loss of data + dialog_suggestion_model_json2 = dialog_suggestion_model.to_dict() + assert dialog_suggestion_model_json2 == dialog_suggestion_model_json + + +class TestModel_DialogSuggestionValue: + """ + Test Class for DialogSuggestionValue + """ + + def test_dialog_suggestion_value_serialization(self): + """ + Test serialization/deserialization for DialogSuggestionValue + """ + + # Construct dict forms of any model objects needed in order to build this model. + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + runtime_intent_model['skill'] = 'testString' + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + runtime_entity_model['skill'] = 'testString' + + message_input_attachment_model = {} # MessageInputAttachment + message_input_attachment_model['url'] = 'testString' + message_input_attachment_model['media_type'] = 'testString' + + request_analytics_model = {} # RequestAnalytics + request_analytics_model['browser'] = 'testString' + request_analytics_model['device'] = 'testString' + request_analytics_model['pageUrl'] = 'testString' + + message_input_options_spelling_model = {} # MessageInputOptionsSpelling + message_input_options_spelling_model['suggestions'] = True + message_input_options_spelling_model['auto_correct'] = True + + message_input_options_model = {} # MessageInputOptions + message_input_options_model['restart'] = False + message_input_options_model['alternate_intents'] = False + message_input_options_model['async_callout'] = False + message_input_options_model['spelling'] = message_input_options_spelling_model + message_input_options_model['debug'] = False + message_input_options_model['return_context'] = False + message_input_options_model['export'] = False + + message_input_model = {} # MessageInput + message_input_model['message_type'] = 'text' + message_input_model['text'] = 'testString' + message_input_model['intents'] = [runtime_intent_model] + message_input_model['entities'] = [runtime_entity_model] + message_input_model['suggestion_id'] = 'testString' + message_input_model['attachments'] = [message_input_attachment_model] + message_input_model['analytics'] = request_analytics_model + message_input_model['options'] = message_input_options_model + + # Construct a json representation of a DialogSuggestionValue model + dialog_suggestion_value_model_json = {} + dialog_suggestion_value_model_json['input'] = message_input_model + + # Construct a model instance of DialogSuggestionValue by calling from_dict on the json representation + dialog_suggestion_value_model = DialogSuggestionValue.from_dict(dialog_suggestion_value_model_json) + assert dialog_suggestion_value_model != False + + # Construct a model instance of DialogSuggestionValue by calling from_dict on the json representation + dialog_suggestion_value_model_dict = DialogSuggestionValue.from_dict(dialog_suggestion_value_model_json).__dict__ + dialog_suggestion_value_model2 = DialogSuggestionValue(**dialog_suggestion_value_model_dict) + + # Verify the model instances are equivalent + assert dialog_suggestion_value_model == dialog_suggestion_value_model2 + + # Convert model instance back to dict and verify no loss of data + dialog_suggestion_value_model_json2 = dialog_suggestion_value_model.to_dict() + assert dialog_suggestion_value_model_json2 == dialog_suggestion_value_model_json + + +class TestModel_DtmfCommandInfo: + """ + Test Class for DtmfCommandInfo + """ + + def test_dtmf_command_info_serialization(self): + """ + Test serialization/deserialization for DtmfCommandInfo + """ + + # Construct a json representation of a DtmfCommandInfo model + dtmf_command_info_model_json = {} + dtmf_command_info_model_json['type'] = 'collect' + dtmf_command_info_model_json['parameters'] = {'anyKey': 'anyValue'} + + # Construct a model instance of DtmfCommandInfo by calling from_dict on the json representation + dtmf_command_info_model = DtmfCommandInfo.from_dict(dtmf_command_info_model_json) + assert dtmf_command_info_model != False + + # Construct a model instance of DtmfCommandInfo by calling from_dict on the json representation + dtmf_command_info_model_dict = DtmfCommandInfo.from_dict(dtmf_command_info_model_json).__dict__ + dtmf_command_info_model2 = DtmfCommandInfo(**dtmf_command_info_model_dict) + + # Verify the model instances are equivalent + assert dtmf_command_info_model == dtmf_command_info_model2 + + # Convert model instance back to dict and verify no loss of data + dtmf_command_info_model_json2 = dtmf_command_info_model.to_dict() + assert dtmf_command_info_model_json2 == dtmf_command_info_model_json + + +class TestModel_Environment: + """ + Test Class for Environment + """ + + def test_environment_serialization(self): + """ + Test serialization/deserialization for Environment + """ + + # Construct dict forms of any model objects needed in order to build this model. + + base_environment_orchestration_model = {} # BaseEnvironmentOrchestration + base_environment_orchestration_model['search_skill_fallback'] = True + + environment_skill_model = {} # EnvironmentSkill + environment_skill_model['skill_id'] = 'testString' + environment_skill_model['type'] = 'dialog' + environment_skill_model['disabled'] = True + environment_skill_model['snapshot'] = 'testString' + environment_skill_model['skill_reference'] = 'testString' + + # Construct a json representation of a Environment model + environment_model_json = {} + environment_model_json['name'] = 'testString' + environment_model_json['description'] = 'testString' + environment_model_json['orchestration'] = base_environment_orchestration_model + environment_model_json['session_timeout'] = 10 + environment_model_json['skill_references'] = [environment_skill_model] + + # Construct a model instance of Environment by calling from_dict on the json representation + environment_model = Environment.from_dict(environment_model_json) + assert environment_model != False + + # Construct a model instance of Environment by calling from_dict on the json representation + environment_model_dict = Environment.from_dict(environment_model_json).__dict__ + environment_model2 = Environment(**environment_model_dict) + + # Verify the model instances are equivalent + assert environment_model == environment_model2 + + # Convert model instance back to dict and verify no loss of data + environment_model_json2 = environment_model.to_dict() + assert environment_model_json2 == environment_model_json + + +class TestModel_EnvironmentCollection: + """ + Test Class for EnvironmentCollection + """ + + def test_environment_collection_serialization(self): + """ + Test serialization/deserialization for EnvironmentCollection + """ + + # Construct dict forms of any model objects needed in order to build this model. + + base_environment_orchestration_model = {} # BaseEnvironmentOrchestration + base_environment_orchestration_model['search_skill_fallback'] = True + + environment_skill_model = {} # EnvironmentSkill + environment_skill_model['skill_id'] = 'testString' + environment_skill_model['type'] = 'dialog' + environment_skill_model['disabled'] = True + environment_skill_model['snapshot'] = 'testString' + environment_skill_model['skill_reference'] = 'testString' + + environment_model = {} # Environment + environment_model['name'] = 'testString' + environment_model['description'] = 'testString' + environment_model['orchestration'] = base_environment_orchestration_model + environment_model['session_timeout'] = 10 + environment_model['skill_references'] = [environment_skill_model] + + pagination_model = {} # Pagination + pagination_model['refresh_url'] = 'testString' + pagination_model['next_url'] = 'testString' + pagination_model['total'] = 38 + pagination_model['matched'] = 38 + pagination_model['refresh_cursor'] = 'testString' + pagination_model['next_cursor'] = 'testString' + + # Construct a json representation of a EnvironmentCollection model + environment_collection_model_json = {} + environment_collection_model_json['environments'] = [environment_model] + environment_collection_model_json['pagination'] = pagination_model + + # Construct a model instance of EnvironmentCollection by calling from_dict on the json representation + environment_collection_model = EnvironmentCollection.from_dict(environment_collection_model_json) + assert environment_collection_model != False + + # Construct a model instance of EnvironmentCollection by calling from_dict on the json representation + environment_collection_model_dict = EnvironmentCollection.from_dict(environment_collection_model_json).__dict__ + environment_collection_model2 = EnvironmentCollection(**environment_collection_model_dict) + + # Verify the model instances are equivalent + assert environment_collection_model == environment_collection_model2 + + # Convert model instance back to dict and verify no loss of data + environment_collection_model_json2 = environment_collection_model.to_dict() + assert environment_collection_model_json2 == environment_collection_model_json + + +class TestModel_EnvironmentReference: + """ + Test Class for EnvironmentReference + """ + + def test_environment_reference_serialization(self): + """ + Test serialization/deserialization for EnvironmentReference + """ + + # Construct a json representation of a EnvironmentReference model + environment_reference_model_json = {} + environment_reference_model_json['name'] = 'testString' + + # Construct a model instance of EnvironmentReference by calling from_dict on the json representation + environment_reference_model = EnvironmentReference.from_dict(environment_reference_model_json) + assert environment_reference_model != False + + # Construct a model instance of EnvironmentReference by calling from_dict on the json representation + environment_reference_model_dict = EnvironmentReference.from_dict(environment_reference_model_json).__dict__ + environment_reference_model2 = EnvironmentReference(**environment_reference_model_dict) + + # Verify the model instances are equivalent + assert environment_reference_model == environment_reference_model2 + + # Convert model instance back to dict and verify no loss of data + environment_reference_model_json2 = environment_reference_model.to_dict() + assert environment_reference_model_json2 == environment_reference_model_json + + +class TestModel_EnvironmentSkill: + """ + Test Class for EnvironmentSkill + """ + + def test_environment_skill_serialization(self): + """ + Test serialization/deserialization for EnvironmentSkill + """ + + # Construct a json representation of a EnvironmentSkill model + environment_skill_model_json = {} + environment_skill_model_json['skill_id'] = 'testString' + environment_skill_model_json['type'] = 'dialog' + environment_skill_model_json['disabled'] = True + environment_skill_model_json['snapshot'] = 'testString' + environment_skill_model_json['skill_reference'] = 'testString' + + # Construct a model instance of EnvironmentSkill by calling from_dict on the json representation + environment_skill_model = EnvironmentSkill.from_dict(environment_skill_model_json) + assert environment_skill_model != False + + # Construct a model instance of EnvironmentSkill by calling from_dict on the json representation + environment_skill_model_dict = EnvironmentSkill.from_dict(environment_skill_model_json).__dict__ + environment_skill_model2 = EnvironmentSkill(**environment_skill_model_dict) + + # Verify the model instances are equivalent + assert environment_skill_model == environment_skill_model2 + + # Convert model instance back to dict and verify no loss of data + environment_skill_model_json2 = environment_skill_model.to_dict() + assert environment_skill_model_json2 == environment_skill_model_json + + +class TestModel_FinalResponse: + """ + Test Class for FinalResponse + """ + + def test_final_response_serialization(self): + """ + Test serialization/deserialization for FinalResponse + """ + + # Construct dict forms of any model objects needed in order to build this model. + + response_generic_citation_ranges_item_model = {} # ResponseGenericCitationRangesItem + response_generic_citation_ranges_item_model['start'] = 38 + response_generic_citation_ranges_item_model['end'] = 38 + + response_generic_citation_model = {} # ResponseGenericCitation + response_generic_citation_model['title'] = 'testString' + response_generic_citation_model['text'] = 'testString' + response_generic_citation_model['body'] = 'testString' + response_generic_citation_model['search_result_index'] = 38 + response_generic_citation_model['ranges'] = [response_generic_citation_ranges_item_model] + + response_generic_confidence_scores_model = {} # ResponseGenericConfidenceScores + response_generic_confidence_scores_model['threshold'] = 72.5 + response_generic_confidence_scores_model['pre_gen'] = 72.5 + response_generic_confidence_scores_model['post_gen'] = 72.5 + response_generic_confidence_scores_model['extractiveness'] = 72.5 + + search_results_result_metadata_model = {} # SearchResultsResultMetadata + search_results_result_metadata_model['document_retrieval_source'] = 'testString' + search_results_result_metadata_model['score'] = 38 + + search_results_model = {} # SearchResults + search_results_model['result_metadata'] = search_results_result_metadata_model + search_results_model['id'] = 'testString' + search_results_model['title'] = 'testString' + search_results_model['body'] = 'testString' + + runtime_response_generic_model = {} # RuntimeResponseGenericRuntimeResponseTypeConversationalSearch + runtime_response_generic_model['response_type'] = 'conversation_search' + runtime_response_generic_model['text'] = 'testString' + runtime_response_generic_model['citations_title'] = 'testString' + runtime_response_generic_model['citations'] = [response_generic_citation_model] + runtime_response_generic_model['confidence_scores'] = response_generic_confidence_scores_model + runtime_response_generic_model['response_length_option'] = 'testString' + runtime_response_generic_model['search_results'] = [search_results_model] + runtime_response_generic_model['disclaimer'] = 'testString' + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + runtime_intent_model['skill'] = 'testString' + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + runtime_entity_model['skill'] = 'testString' + + dialog_node_action_model = {} # DialogNodeAction + dialog_node_action_model['name'] = 'testString' + dialog_node_action_model['type'] = 'client' + dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'} + dialog_node_action_model['result_variable'] = 'testString' + dialog_node_action_model['credentials'] = 'testString' + + dialog_node_visited_model = {} # DialogNodeVisited + dialog_node_visited_model['dialog_node'] = 'testString' + dialog_node_visited_model['title'] = 'testString' + dialog_node_visited_model['conditions'] = 'testString' + + log_message_source_model = {} # LogMessageSourceDialogNode + log_message_source_model['type'] = 'dialog_node' + log_message_source_model['dialog_node'] = 'testString' + + dialog_log_message_model = {} # DialogLogMessage + dialog_log_message_model['level'] = 'info' + dialog_log_message_model['message'] = 'testString' + dialog_log_message_model['code'] = 'testString' + dialog_log_message_model['source'] = log_message_source_model + + turn_event_action_source_model = {} # TurnEventActionSource + turn_event_action_source_model['type'] = 'action' + turn_event_action_source_model['action'] = 'testString' + turn_event_action_source_model['action_title'] = 'testString' + turn_event_action_source_model['condition'] = 'testString' + + message_output_debug_turn_event_model = {} # MessageOutputDebugTurnEventTurnEventActionVisited + message_output_debug_turn_event_model['event'] = 'action_visited' + message_output_debug_turn_event_model['source'] = turn_event_action_source_model + message_output_debug_turn_event_model['action_start_time'] = 'testString' + message_output_debug_turn_event_model['condition_type'] = 'user_defined' + message_output_debug_turn_event_model['reason'] = 'intent' + message_output_debug_turn_event_model['result_variable'] = 'testString' + + message_output_debug_model = {} # MessageOutputDebug + message_output_debug_model['nodes_visited'] = [dialog_node_visited_model] + message_output_debug_model['log_messages'] = [dialog_log_message_model] + message_output_debug_model['branch_exited'] = True + message_output_debug_model['branch_exited_reason'] = 'completed' + message_output_debug_model['turn_events'] = [message_output_debug_turn_event_model] + + message_output_spelling_model = {} # MessageOutputSpelling + message_output_spelling_model['text'] = 'testString' + message_output_spelling_model['original_text'] = 'testString' + message_output_spelling_model['suggested_text'] = 'testString' + + message_output_llm_metadata_model = {} # MessageOutputLLMMetadata + message_output_llm_metadata_model['task'] = 'testString' + message_output_llm_metadata_model['model_id'] = 'testString' + + metadata_model = {} # Metadata + metadata_model['id'] = 38 + + message_stream_metadata_model = {} # MessageStreamMetadata + message_stream_metadata_model['streaming_metadata'] = metadata_model + + final_response_output_model = {} # FinalResponseOutput + final_response_output_model['generic'] = [runtime_response_generic_model] + final_response_output_model['intents'] = [runtime_intent_model] + final_response_output_model['entities'] = [runtime_entity_model] + final_response_output_model['actions'] = [dialog_node_action_model] + final_response_output_model['debug'] = message_output_debug_model + final_response_output_model['user_defined'] = {'anyKey': 'anyValue'} + final_response_output_model['spelling'] = message_output_spelling_model + final_response_output_model['llm_metadata'] = [message_output_llm_metadata_model] + final_response_output_model['streaming_metadata'] = message_stream_metadata_model + + message_context_global_system_model = {} # MessageContextGlobalSystem + message_context_global_system_model['timezone'] = 'testString' + message_context_global_system_model['user_id'] = 'testString' + message_context_global_system_model['turn_count'] = 38 + message_context_global_system_model['locale'] = 'en-us' + message_context_global_system_model['reference_time'] = 'testString' + message_context_global_system_model['session_start_time'] = 'testString' + message_context_global_system_model['state'] = 'testString' + message_context_global_system_model['skip_user_input'] = True + + message_context_global_model = {} # MessageContextGlobal + message_context_global_model['system'] = message_context_global_system_model + + message_context_skill_system_model = {} # MessageContextSkillSystem + message_context_skill_system_model['state'] = 'testString' + message_context_skill_system_model['foo'] = 'testString' + + message_context_dialog_skill_model = {} # MessageContextDialogSkill + message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'} + message_context_dialog_skill_model['system'] = message_context_skill_system_model + + message_context_action_skill_model = {} # MessageContextActionSkill + message_context_action_skill_model['user_defined'] = {'anyKey': 'anyValue'} + message_context_action_skill_model['system'] = message_context_skill_system_model + message_context_action_skill_model['action_variables'] = {'anyKey': 'anyValue'} + message_context_action_skill_model['skill_variables'] = {'anyKey': 'anyValue'} + + message_context_skills_model = {} # MessageContextSkills + message_context_skills_model['main skill'] = message_context_dialog_skill_model + message_context_skills_model['actions skill'] = message_context_action_skill_model + + message_context_model = {} # MessageContext + message_context_model['global'] = message_context_global_model + message_context_model['skills'] = message_context_skills_model + message_context_model['integrations'] = {'anyKey': 'anyValue'} + + message_output_model = {} # MessageOutput + message_output_model['generic'] = [runtime_response_generic_model] + message_output_model['intents'] = [runtime_intent_model] + message_output_model['entities'] = [runtime_entity_model] + message_output_model['actions'] = [dialog_node_action_model] + message_output_model['debug'] = message_output_debug_model + message_output_model['user_defined'] = {'anyKey': 'anyValue'} + message_output_model['spelling'] = message_output_spelling_model + message_output_model['llm_metadata'] = [message_output_llm_metadata_model] + + message_input_attachment_model = {} # MessageInputAttachment + message_input_attachment_model['url'] = 'testString' + message_input_attachment_model['media_type'] = 'testString' + + request_analytics_model = {} # RequestAnalytics + request_analytics_model['browser'] = 'testString' + request_analytics_model['device'] = 'testString' + request_analytics_model['pageUrl'] = 'testString' + + message_input_options_spelling_model = {} # MessageInputOptionsSpelling + message_input_options_spelling_model['suggestions'] = True + message_input_options_spelling_model['auto_correct'] = True + + message_input_options_model = {} # MessageInputOptions + message_input_options_model['restart'] = False + message_input_options_model['alternate_intents'] = False + message_input_options_model['async_callout'] = False + message_input_options_model['spelling'] = message_input_options_spelling_model + message_input_options_model['debug'] = False + message_input_options_model['return_context'] = False + message_input_options_model['export'] = False + + message_input_model = {} # MessageInput + message_input_model['message_type'] = 'text' + message_input_model['text'] = 'testString' + message_input_model['intents'] = [runtime_intent_model] + message_input_model['entities'] = [runtime_entity_model] + message_input_model['suggestion_id'] = 'testString' + message_input_model['attachments'] = [message_input_attachment_model] + message_input_model['analytics'] = request_analytics_model + message_input_model['options'] = message_input_options_model + + # Construct a json representation of a FinalResponse model + final_response_model_json = {} + final_response_model_json['output'] = final_response_output_model + final_response_model_json['context'] = message_context_model + final_response_model_json['user_id'] = 'testString' + final_response_model_json['masked_output'] = message_output_model + final_response_model_json['masked_input'] = message_input_model + + # Construct a model instance of FinalResponse by calling from_dict on the json representation + final_response_model = FinalResponse.from_dict(final_response_model_json) + assert final_response_model != False + + # Construct a model instance of FinalResponse by calling from_dict on the json representation + final_response_model_dict = FinalResponse.from_dict(final_response_model_json).__dict__ + final_response_model2 = FinalResponse(**final_response_model_dict) + + # Verify the model instances are equivalent + assert final_response_model == final_response_model2 + + # Convert model instance back to dict and verify no loss of data + final_response_model_json2 = final_response_model.to_dict() + assert final_response_model_json2 == final_response_model_json + + +class TestModel_FinalResponseOutput: + """ + Test Class for FinalResponseOutput + """ + + def test_final_response_output_serialization(self): + """ + Test serialization/deserialization for FinalResponseOutput + """ + + # Construct dict forms of any model objects needed in order to build this model. + + response_generic_citation_ranges_item_model = {} # ResponseGenericCitationRangesItem + response_generic_citation_ranges_item_model['start'] = 38 + response_generic_citation_ranges_item_model['end'] = 38 + + response_generic_citation_model = {} # ResponseGenericCitation + response_generic_citation_model['title'] = 'testString' + response_generic_citation_model['text'] = 'testString' + response_generic_citation_model['body'] = 'testString' + response_generic_citation_model['search_result_index'] = 38 + response_generic_citation_model['ranges'] = [response_generic_citation_ranges_item_model] + + response_generic_confidence_scores_model = {} # ResponseGenericConfidenceScores + response_generic_confidence_scores_model['threshold'] = 72.5 + response_generic_confidence_scores_model['pre_gen'] = 72.5 + response_generic_confidence_scores_model['post_gen'] = 72.5 + response_generic_confidence_scores_model['extractiveness'] = 72.5 + + search_results_result_metadata_model = {} # SearchResultsResultMetadata + search_results_result_metadata_model['document_retrieval_source'] = 'testString' + search_results_result_metadata_model['score'] = 38 + + search_results_model = {} # SearchResults + search_results_model['result_metadata'] = search_results_result_metadata_model + search_results_model['id'] = 'testString' + search_results_model['title'] = 'testString' + search_results_model['body'] = 'testString' + + runtime_response_generic_model = {} # RuntimeResponseGenericRuntimeResponseTypeConversationalSearch + runtime_response_generic_model['response_type'] = 'conversation_search' + runtime_response_generic_model['text'] = 'testString' + runtime_response_generic_model['citations_title'] = 'testString' + runtime_response_generic_model['citations'] = [response_generic_citation_model] + runtime_response_generic_model['confidence_scores'] = response_generic_confidence_scores_model + runtime_response_generic_model['response_length_option'] = 'testString' + runtime_response_generic_model['search_results'] = [search_results_model] + runtime_response_generic_model['disclaimer'] = 'testString' + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + runtime_intent_model['skill'] = 'testString' + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + runtime_entity_model['skill'] = 'testString' + + dialog_node_action_model = {} # DialogNodeAction + dialog_node_action_model['name'] = 'testString' + dialog_node_action_model['type'] = 'client' + dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'} + dialog_node_action_model['result_variable'] = 'testString' + dialog_node_action_model['credentials'] = 'testString' + + dialog_node_visited_model = {} # DialogNodeVisited + dialog_node_visited_model['dialog_node'] = 'testString' + dialog_node_visited_model['title'] = 'testString' + dialog_node_visited_model['conditions'] = 'testString' + + log_message_source_model = {} # LogMessageSourceDialogNode + log_message_source_model['type'] = 'dialog_node' + log_message_source_model['dialog_node'] = 'testString' + + dialog_log_message_model = {} # DialogLogMessage + dialog_log_message_model['level'] = 'info' + dialog_log_message_model['message'] = 'testString' + dialog_log_message_model['code'] = 'testString' + dialog_log_message_model['source'] = log_message_source_model + + turn_event_action_source_model = {} # TurnEventActionSource + turn_event_action_source_model['type'] = 'action' + turn_event_action_source_model['action'] = 'testString' + turn_event_action_source_model['action_title'] = 'testString' + turn_event_action_source_model['condition'] = 'testString' + + message_output_debug_turn_event_model = {} # MessageOutputDebugTurnEventTurnEventActionVisited + message_output_debug_turn_event_model['event'] = 'action_visited' + message_output_debug_turn_event_model['source'] = turn_event_action_source_model + message_output_debug_turn_event_model['action_start_time'] = 'testString' + message_output_debug_turn_event_model['condition_type'] = 'user_defined' + message_output_debug_turn_event_model['reason'] = 'intent' + message_output_debug_turn_event_model['result_variable'] = 'testString' + + message_output_debug_model = {} # MessageOutputDebug + message_output_debug_model['nodes_visited'] = [dialog_node_visited_model] + message_output_debug_model['log_messages'] = [dialog_log_message_model] + message_output_debug_model['branch_exited'] = True + message_output_debug_model['branch_exited_reason'] = 'completed' + message_output_debug_model['turn_events'] = [message_output_debug_turn_event_model] + + message_output_spelling_model = {} # MessageOutputSpelling + message_output_spelling_model['text'] = 'testString' + message_output_spelling_model['original_text'] = 'testString' + message_output_spelling_model['suggested_text'] = 'testString' + + message_output_llm_metadata_model = {} # MessageOutputLLMMetadata + message_output_llm_metadata_model['task'] = 'testString' + message_output_llm_metadata_model['model_id'] = 'testString' + + metadata_model = {} # Metadata + metadata_model['id'] = 38 + + message_stream_metadata_model = {} # MessageStreamMetadata + message_stream_metadata_model['streaming_metadata'] = metadata_model + + # Construct a json representation of a FinalResponseOutput model + final_response_output_model_json = {} + final_response_output_model_json['generic'] = [runtime_response_generic_model] + final_response_output_model_json['intents'] = [runtime_intent_model] + final_response_output_model_json['entities'] = [runtime_entity_model] + final_response_output_model_json['actions'] = [dialog_node_action_model] + final_response_output_model_json['debug'] = message_output_debug_model + final_response_output_model_json['user_defined'] = {'anyKey': 'anyValue'} + final_response_output_model_json['spelling'] = message_output_spelling_model + final_response_output_model_json['llm_metadata'] = [message_output_llm_metadata_model] + final_response_output_model_json['streaming_metadata'] = message_stream_metadata_model + + # Construct a model instance of FinalResponseOutput by calling from_dict on the json representation + final_response_output_model = FinalResponseOutput.from_dict(final_response_output_model_json) + assert final_response_output_model != False + + # Construct a model instance of FinalResponseOutput by calling from_dict on the json representation + final_response_output_model_dict = FinalResponseOutput.from_dict(final_response_output_model_json).__dict__ + final_response_output_model2 = FinalResponseOutput(**final_response_output_model_dict) + + # Verify the model instances are equivalent + assert final_response_output_model == final_response_output_model2 + + # Convert model instance back to dict and verify no loss of data + final_response_output_model_json2 = final_response_output_model.to_dict() + assert final_response_output_model_json2 == final_response_output_model_json + + +class TestModel_GenerativeAITaskConfidenceScores: + """ + Test Class for GenerativeAITaskConfidenceScores + """ + + def test_generative_ai_task_confidence_scores_serialization(self): + """ + Test serialization/deserialization for GenerativeAITaskConfidenceScores + """ + + # Construct a json representation of a GenerativeAITaskConfidenceScores model + generative_ai_task_confidence_scores_model_json = {} + generative_ai_task_confidence_scores_model_json['pre_gen'] = 72.5 + generative_ai_task_confidence_scores_model_json['pre_gen_threshold'] = 72.5 + generative_ai_task_confidence_scores_model_json['post_gen'] = 72.5 + generative_ai_task_confidence_scores_model_json['post_gen_threshold'] = 72.5 + + # Construct a model instance of GenerativeAITaskConfidenceScores by calling from_dict on the json representation + generative_ai_task_confidence_scores_model = GenerativeAITaskConfidenceScores.from_dict(generative_ai_task_confidence_scores_model_json) + assert generative_ai_task_confidence_scores_model != False + + # Construct a model instance of GenerativeAITaskConfidenceScores by calling from_dict on the json representation + generative_ai_task_confidence_scores_model_dict = GenerativeAITaskConfidenceScores.from_dict(generative_ai_task_confidence_scores_model_json).__dict__ + generative_ai_task_confidence_scores_model2 = GenerativeAITaskConfidenceScores(**generative_ai_task_confidence_scores_model_dict) + + # Verify the model instances are equivalent + assert generative_ai_task_confidence_scores_model == generative_ai_task_confidence_scores_model2 + + # Convert model instance back to dict and verify no loss of data + generative_ai_task_confidence_scores_model_json2 = generative_ai_task_confidence_scores_model.to_dict() + assert generative_ai_task_confidence_scores_model_json2 == generative_ai_task_confidence_scores_model_json + + +class TestModel_IntegrationReference: + """ + Test Class for IntegrationReference + """ + + def test_integration_reference_serialization(self): + """ + Test serialization/deserialization for IntegrationReference + """ + + # Construct a json representation of a IntegrationReference model + integration_reference_model_json = {} + integration_reference_model_json['integration_id'] = 'testString' + integration_reference_model_json['type'] = 'testString' + + # Construct a model instance of IntegrationReference by calling from_dict on the json representation + integration_reference_model = IntegrationReference.from_dict(integration_reference_model_json) + assert integration_reference_model != False + + # Construct a model instance of IntegrationReference by calling from_dict on the json representation + integration_reference_model_dict = IntegrationReference.from_dict(integration_reference_model_json).__dict__ + integration_reference_model2 = IntegrationReference(**integration_reference_model_dict) + + # Verify the model instances are equivalent + assert integration_reference_model == integration_reference_model2 + + # Convert model instance back to dict and verify no loss of data + integration_reference_model_json2 = integration_reference_model.to_dict() + assert integration_reference_model_json2 == integration_reference_model_json + + +class TestModel_Log: + """ + Test Class for Log + """ + + def test_log_serialization(self): + """ + Test serialization/deserialization for Log + """ + + # Construct dict forms of any model objects needed in order to build this model. + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + runtime_intent_model['skill'] = 'testString' + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + runtime_entity_model['skill'] = 'testString' + + message_input_attachment_model = {} # MessageInputAttachment + message_input_attachment_model['url'] = 'testString' + message_input_attachment_model['media_type'] = 'testString' + + request_analytics_model = {} # RequestAnalytics + request_analytics_model['browser'] = 'testString' + request_analytics_model['device'] = 'testString' + request_analytics_model['pageUrl'] = 'testString' + + message_input_options_spelling_model = {} # MessageInputOptionsSpelling + message_input_options_spelling_model['suggestions'] = True + message_input_options_spelling_model['auto_correct'] = True + + message_input_options_model = {} # MessageInputOptions + message_input_options_model['restart'] = False + message_input_options_model['alternate_intents'] = False + message_input_options_model['async_callout'] = False + message_input_options_model['spelling'] = message_input_options_spelling_model + message_input_options_model['debug'] = False + message_input_options_model['return_context'] = False + message_input_options_model['export'] = False + + log_request_input_model = {} # LogRequestInput + log_request_input_model['message_type'] = 'text' + log_request_input_model['text'] = 'testString' + log_request_input_model['intents'] = [runtime_intent_model] + log_request_input_model['entities'] = [runtime_entity_model] + log_request_input_model['suggestion_id'] = 'testString' + log_request_input_model['attachments'] = [message_input_attachment_model] + log_request_input_model['analytics'] = request_analytics_model + log_request_input_model['options'] = message_input_options_model + + message_context_global_system_model = {} # MessageContextGlobalSystem + message_context_global_system_model['timezone'] = 'testString' + message_context_global_system_model['user_id'] = 'testString' + message_context_global_system_model['turn_count'] = 38 + message_context_global_system_model['locale'] = 'en-us' + message_context_global_system_model['reference_time'] = 'testString' + message_context_global_system_model['session_start_time'] = 'testString' + message_context_global_system_model['state'] = 'testString' + message_context_global_system_model['skip_user_input'] = True + + message_context_global_model = {} # MessageContextGlobal + message_context_global_model['system'] = message_context_global_system_model + + message_context_skill_system_model = {} # MessageContextSkillSystem + message_context_skill_system_model['state'] = 'testString' + message_context_skill_system_model['foo'] = 'testString' + + message_context_dialog_skill_model = {} # MessageContextDialogSkill + message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'} + message_context_dialog_skill_model['system'] = message_context_skill_system_model + + message_context_action_skill_model = {} # MessageContextActionSkill + message_context_action_skill_model['user_defined'] = {'anyKey': 'anyValue'} + message_context_action_skill_model['system'] = message_context_skill_system_model + message_context_action_skill_model['action_variables'] = {'anyKey': 'anyValue'} + message_context_action_skill_model['skill_variables'] = {'anyKey': 'anyValue'} + + message_context_skills_model = {} # MessageContextSkills + message_context_skills_model['main skill'] = message_context_dialog_skill_model + message_context_skills_model['actions skill'] = message_context_action_skill_model + + message_context_model = {} # MessageContext + message_context_model['global'] = message_context_global_model + message_context_model['skills'] = message_context_skills_model + message_context_model['integrations'] = {'anyKey': 'anyValue'} + + log_request_model = {} # LogRequest + log_request_model['input'] = log_request_input_model + log_request_model['context'] = message_context_model + log_request_model['user_id'] = 'testString' + + response_generic_citation_ranges_item_model = {} # ResponseGenericCitationRangesItem + response_generic_citation_ranges_item_model['start'] = 38 + response_generic_citation_ranges_item_model['end'] = 38 + + response_generic_citation_model = {} # ResponseGenericCitation + response_generic_citation_model['title'] = 'testString' + response_generic_citation_model['text'] = 'testString' + response_generic_citation_model['body'] = 'testString' + response_generic_citation_model['search_result_index'] = 38 + response_generic_citation_model['ranges'] = [response_generic_citation_ranges_item_model] + + response_generic_confidence_scores_model = {} # ResponseGenericConfidenceScores + response_generic_confidence_scores_model['threshold'] = 72.5 + response_generic_confidence_scores_model['pre_gen'] = 72.5 + response_generic_confidence_scores_model['post_gen'] = 72.5 + response_generic_confidence_scores_model['extractiveness'] = 72.5 + + search_results_result_metadata_model = {} # SearchResultsResultMetadata + search_results_result_metadata_model['document_retrieval_source'] = 'testString' + search_results_result_metadata_model['score'] = 38 + + search_results_model = {} # SearchResults + search_results_model['result_metadata'] = search_results_result_metadata_model + search_results_model['id'] = 'testString' + search_results_model['title'] = 'testString' + search_results_model['body'] = 'testString' + + runtime_response_generic_model = {} # RuntimeResponseGenericRuntimeResponseTypeConversationalSearch + runtime_response_generic_model['response_type'] = 'conversation_search' + runtime_response_generic_model['text'] = 'testString' + runtime_response_generic_model['citations_title'] = 'testString' + runtime_response_generic_model['citations'] = [response_generic_citation_model] + runtime_response_generic_model['confidence_scores'] = response_generic_confidence_scores_model + runtime_response_generic_model['response_length_option'] = 'testString' + runtime_response_generic_model['search_results'] = [search_results_model] + runtime_response_generic_model['disclaimer'] = 'testString' + + dialog_node_action_model = {} # DialogNodeAction + dialog_node_action_model['name'] = 'testString' + dialog_node_action_model['type'] = 'client' + dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'} + dialog_node_action_model['result_variable'] = 'testString' + dialog_node_action_model['credentials'] = 'testString' + + dialog_node_visited_model = {} # DialogNodeVisited + dialog_node_visited_model['dialog_node'] = 'testString' + dialog_node_visited_model['title'] = 'testString' + dialog_node_visited_model['conditions'] = 'testString' + + log_message_source_model = {} # LogMessageSourceDialogNode + log_message_source_model['type'] = 'dialog_node' + log_message_source_model['dialog_node'] = 'testString' + + dialog_log_message_model = {} # DialogLogMessage + dialog_log_message_model['level'] = 'info' + dialog_log_message_model['message'] = 'testString' + dialog_log_message_model['code'] = 'testString' + dialog_log_message_model['source'] = log_message_source_model + + turn_event_action_source_model = {} # TurnEventActionSource + turn_event_action_source_model['type'] = 'action' + turn_event_action_source_model['action'] = 'testString' + turn_event_action_source_model['action_title'] = 'testString' + turn_event_action_source_model['condition'] = 'testString' + + message_output_debug_turn_event_model = {} # MessageOutputDebugTurnEventTurnEventActionVisited + message_output_debug_turn_event_model['event'] = 'action_visited' + message_output_debug_turn_event_model['source'] = turn_event_action_source_model + message_output_debug_turn_event_model['action_start_time'] = 'testString' + message_output_debug_turn_event_model['condition_type'] = 'user_defined' + message_output_debug_turn_event_model['reason'] = 'intent' + message_output_debug_turn_event_model['result_variable'] = 'testString' + + message_output_debug_model = {} # MessageOutputDebug + message_output_debug_model['nodes_visited'] = [dialog_node_visited_model] + message_output_debug_model['log_messages'] = [dialog_log_message_model] + message_output_debug_model['branch_exited'] = True + message_output_debug_model['branch_exited_reason'] = 'completed' + message_output_debug_model['turn_events'] = [message_output_debug_turn_event_model] + + message_output_spelling_model = {} # MessageOutputSpelling + message_output_spelling_model['text'] = 'testString' + message_output_spelling_model['original_text'] = 'testString' + message_output_spelling_model['suggested_text'] = 'testString' + + message_output_llm_metadata_model = {} # MessageOutputLLMMetadata + message_output_llm_metadata_model['task'] = 'testString' + message_output_llm_metadata_model['model_id'] = 'testString' + + log_response_output_model = {} # LogResponseOutput + log_response_output_model['generic'] = [runtime_response_generic_model] + log_response_output_model['intents'] = [runtime_intent_model] + log_response_output_model['entities'] = [runtime_entity_model] + log_response_output_model['actions'] = [dialog_node_action_model] + log_response_output_model['debug'] = message_output_debug_model + log_response_output_model['user_defined'] = {'anyKey': 'anyValue'} + log_response_output_model['spelling'] = message_output_spelling_model + log_response_output_model['llm_metadata'] = [message_output_llm_metadata_model] + + log_response_model = {} # LogResponse + log_response_model['output'] = log_response_output_model + log_response_model['context'] = message_context_model + log_response_model['user_id'] = 'testString' + + # Construct a json representation of a Log model + log_model_json = {} + log_model_json['log_id'] = 'testString' + log_model_json['request'] = log_request_model + log_model_json['response'] = log_response_model + log_model_json['assistant_id'] = 'testString' + log_model_json['session_id'] = 'testString' + log_model_json['skill_id'] = 'testString' + log_model_json['snapshot'] = 'testString' + log_model_json['request_timestamp'] = 'testString' + log_model_json['response_timestamp'] = 'testString' + log_model_json['language'] = 'testString' + log_model_json['customer_id'] = 'testString' + + # Construct a model instance of Log by calling from_dict on the json representation + log_model = Log.from_dict(log_model_json) + assert log_model != False + + # Construct a model instance of Log by calling from_dict on the json representation + log_model_dict = Log.from_dict(log_model_json).__dict__ + log_model2 = Log(**log_model_dict) + + # Verify the model instances are equivalent + assert log_model == log_model2 + + # Convert model instance back to dict and verify no loss of data + log_model_json2 = log_model.to_dict() + assert log_model_json2 == log_model_json + + +class TestModel_LogCollection: + """ + Test Class for LogCollection + """ + + def test_log_collection_serialization(self): + """ + Test serialization/deserialization for LogCollection + """ + + # Construct dict forms of any model objects needed in order to build this model. + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + runtime_intent_model['skill'] = 'testString' + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + runtime_entity_model['skill'] = 'testString' + + message_input_attachment_model = {} # MessageInputAttachment + message_input_attachment_model['url'] = 'testString' + message_input_attachment_model['media_type'] = 'testString' + + request_analytics_model = {} # RequestAnalytics + request_analytics_model['browser'] = 'testString' + request_analytics_model['device'] = 'testString' + request_analytics_model['pageUrl'] = 'testString' + + message_input_options_spelling_model = {} # MessageInputOptionsSpelling + message_input_options_spelling_model['suggestions'] = True + message_input_options_spelling_model['auto_correct'] = True + + message_input_options_model = {} # MessageInputOptions + message_input_options_model['restart'] = False + message_input_options_model['alternate_intents'] = False + message_input_options_model['async_callout'] = False + message_input_options_model['spelling'] = message_input_options_spelling_model + message_input_options_model['debug'] = False + message_input_options_model['return_context'] = False + message_input_options_model['export'] = False + + log_request_input_model = {} # LogRequestInput + log_request_input_model['message_type'] = 'text' + log_request_input_model['text'] = 'testString' + log_request_input_model['intents'] = [runtime_intent_model] + log_request_input_model['entities'] = [runtime_entity_model] + log_request_input_model['suggestion_id'] = 'testString' + log_request_input_model['attachments'] = [message_input_attachment_model] + log_request_input_model['analytics'] = request_analytics_model + log_request_input_model['options'] = message_input_options_model + + message_context_global_system_model = {} # MessageContextGlobalSystem + message_context_global_system_model['timezone'] = 'testString' + message_context_global_system_model['user_id'] = 'testString' + message_context_global_system_model['turn_count'] = 38 + message_context_global_system_model['locale'] = 'en-us' + message_context_global_system_model['reference_time'] = 'testString' + message_context_global_system_model['session_start_time'] = 'testString' + message_context_global_system_model['state'] = 'testString' + message_context_global_system_model['skip_user_input'] = True + + message_context_global_model = {} # MessageContextGlobal + message_context_global_model['system'] = message_context_global_system_model + + message_context_skill_system_model = {} # MessageContextSkillSystem + message_context_skill_system_model['state'] = 'testString' + message_context_skill_system_model['foo'] = 'testString' + + message_context_dialog_skill_model = {} # MessageContextDialogSkill + message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'} + message_context_dialog_skill_model['system'] = message_context_skill_system_model + + message_context_action_skill_model = {} # MessageContextActionSkill + message_context_action_skill_model['user_defined'] = {'anyKey': 'anyValue'} + message_context_action_skill_model['system'] = message_context_skill_system_model + message_context_action_skill_model['action_variables'] = {'anyKey': 'anyValue'} + message_context_action_skill_model['skill_variables'] = {'anyKey': 'anyValue'} + + message_context_skills_model = {} # MessageContextSkills + message_context_skills_model['main skill'] = message_context_dialog_skill_model + message_context_skills_model['actions skill'] = message_context_action_skill_model + + message_context_model = {} # MessageContext + message_context_model['global'] = message_context_global_model + message_context_model['skills'] = message_context_skills_model + message_context_model['integrations'] = {'anyKey': 'anyValue'} + + log_request_model = {} # LogRequest + log_request_model['input'] = log_request_input_model + log_request_model['context'] = message_context_model + log_request_model['user_id'] = 'testString' + + response_generic_citation_ranges_item_model = {} # ResponseGenericCitationRangesItem + response_generic_citation_ranges_item_model['start'] = 38 + response_generic_citation_ranges_item_model['end'] = 38 + + response_generic_citation_model = {} # ResponseGenericCitation + response_generic_citation_model['title'] = 'testString' + response_generic_citation_model['text'] = 'testString' + response_generic_citation_model['body'] = 'testString' + response_generic_citation_model['search_result_index'] = 38 + response_generic_citation_model['ranges'] = [response_generic_citation_ranges_item_model] + + response_generic_confidence_scores_model = {} # ResponseGenericConfidenceScores + response_generic_confidence_scores_model['threshold'] = 72.5 + response_generic_confidence_scores_model['pre_gen'] = 72.5 + response_generic_confidence_scores_model['post_gen'] = 72.5 + response_generic_confidence_scores_model['extractiveness'] = 72.5 + + search_results_result_metadata_model = {} # SearchResultsResultMetadata + search_results_result_metadata_model['document_retrieval_source'] = 'testString' + search_results_result_metadata_model['score'] = 38 + + search_results_model = {} # SearchResults + search_results_model['result_metadata'] = search_results_result_metadata_model + search_results_model['id'] = 'testString' + search_results_model['title'] = 'testString' + search_results_model['body'] = 'testString' + + runtime_response_generic_model = {} # RuntimeResponseGenericRuntimeResponseTypeConversationalSearch + runtime_response_generic_model['response_type'] = 'conversation_search' + runtime_response_generic_model['text'] = 'testString' + runtime_response_generic_model['citations_title'] = 'testString' + runtime_response_generic_model['citations'] = [response_generic_citation_model] + runtime_response_generic_model['confidence_scores'] = response_generic_confidence_scores_model + runtime_response_generic_model['response_length_option'] = 'testString' + runtime_response_generic_model['search_results'] = [search_results_model] + runtime_response_generic_model['disclaimer'] = 'testString' + + dialog_node_action_model = {} # DialogNodeAction + dialog_node_action_model['name'] = 'testString' + dialog_node_action_model['type'] = 'client' + dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'} + dialog_node_action_model['result_variable'] = 'testString' + dialog_node_action_model['credentials'] = 'testString' + + dialog_node_visited_model = {} # DialogNodeVisited + dialog_node_visited_model['dialog_node'] = 'testString' + dialog_node_visited_model['title'] = 'testString' + dialog_node_visited_model['conditions'] = 'testString' + + log_message_source_model = {} # LogMessageSourceDialogNode + log_message_source_model['type'] = 'dialog_node' + log_message_source_model['dialog_node'] = 'testString' + + dialog_log_message_model = {} # DialogLogMessage + dialog_log_message_model['level'] = 'info' + dialog_log_message_model['message'] = 'testString' + dialog_log_message_model['code'] = 'testString' + dialog_log_message_model['source'] = log_message_source_model + + turn_event_action_source_model = {} # TurnEventActionSource + turn_event_action_source_model['type'] = 'action' + turn_event_action_source_model['action'] = 'testString' + turn_event_action_source_model['action_title'] = 'testString' + turn_event_action_source_model['condition'] = 'testString' + + message_output_debug_turn_event_model = {} # MessageOutputDebugTurnEventTurnEventActionVisited + message_output_debug_turn_event_model['event'] = 'action_visited' + message_output_debug_turn_event_model['source'] = turn_event_action_source_model + message_output_debug_turn_event_model['action_start_time'] = 'testString' + message_output_debug_turn_event_model['condition_type'] = 'user_defined' + message_output_debug_turn_event_model['reason'] = 'intent' + message_output_debug_turn_event_model['result_variable'] = 'testString' + + message_output_debug_model = {} # MessageOutputDebug + message_output_debug_model['nodes_visited'] = [dialog_node_visited_model] + message_output_debug_model['log_messages'] = [dialog_log_message_model] + message_output_debug_model['branch_exited'] = True + message_output_debug_model['branch_exited_reason'] = 'completed' + message_output_debug_model['turn_events'] = [message_output_debug_turn_event_model] + + message_output_spelling_model = {} # MessageOutputSpelling + message_output_spelling_model['text'] = 'testString' + message_output_spelling_model['original_text'] = 'testString' + message_output_spelling_model['suggested_text'] = 'testString' + + message_output_llm_metadata_model = {} # MessageOutputLLMMetadata + message_output_llm_metadata_model['task'] = 'testString' + message_output_llm_metadata_model['model_id'] = 'testString' + + log_response_output_model = {} # LogResponseOutput + log_response_output_model['generic'] = [runtime_response_generic_model] + log_response_output_model['intents'] = [runtime_intent_model] + log_response_output_model['entities'] = [runtime_entity_model] + log_response_output_model['actions'] = [dialog_node_action_model] + log_response_output_model['debug'] = message_output_debug_model + log_response_output_model['user_defined'] = {'anyKey': 'anyValue'} + log_response_output_model['spelling'] = message_output_spelling_model + log_response_output_model['llm_metadata'] = [message_output_llm_metadata_model] + + log_response_model = {} # LogResponse + log_response_model['output'] = log_response_output_model + log_response_model['context'] = message_context_model + log_response_model['user_id'] = 'testString' + + log_model = {} # Log + log_model['log_id'] = 'testString' + log_model['request'] = log_request_model + log_model['response'] = log_response_model + log_model['assistant_id'] = 'testString' + log_model['session_id'] = 'testString' + log_model['skill_id'] = 'testString' + log_model['snapshot'] = 'testString' + log_model['request_timestamp'] = 'testString' + log_model['response_timestamp'] = 'testString' + log_model['language'] = 'testString' + log_model['customer_id'] = 'testString' + + log_pagination_model = {} # LogPagination + log_pagination_model['next_url'] = 'testString' + log_pagination_model['matched'] = 38 + log_pagination_model['next_cursor'] = 'testString' + + # Construct a json representation of a LogCollection model + log_collection_model_json = {} + log_collection_model_json['logs'] = [log_model] + log_collection_model_json['pagination'] = log_pagination_model + + # Construct a model instance of LogCollection by calling from_dict on the json representation + log_collection_model = LogCollection.from_dict(log_collection_model_json) + assert log_collection_model != False + + # Construct a model instance of LogCollection by calling from_dict on the json representation + log_collection_model_dict = LogCollection.from_dict(log_collection_model_json).__dict__ + log_collection_model2 = LogCollection(**log_collection_model_dict) + + # Verify the model instances are equivalent + assert log_collection_model == log_collection_model2 + + # Convert model instance back to dict and verify no loss of data + log_collection_model_json2 = log_collection_model.to_dict() + assert log_collection_model_json2 == log_collection_model_json + + +class TestModel_LogPagination: + """ + Test Class for LogPagination + """ + + def test_log_pagination_serialization(self): + """ + Test serialization/deserialization for LogPagination + """ + + # Construct a json representation of a LogPagination model + log_pagination_model_json = {} + log_pagination_model_json['next_url'] = 'testString' + log_pagination_model_json['matched'] = 38 + log_pagination_model_json['next_cursor'] = 'testString' + + # Construct a model instance of LogPagination by calling from_dict on the json representation + log_pagination_model = LogPagination.from_dict(log_pagination_model_json) + assert log_pagination_model != False + + # Construct a model instance of LogPagination by calling from_dict on the json representation + log_pagination_model_dict = LogPagination.from_dict(log_pagination_model_json).__dict__ + log_pagination_model2 = LogPagination(**log_pagination_model_dict) + + # Verify the model instances are equivalent + assert log_pagination_model == log_pagination_model2 + + # Convert model instance back to dict and verify no loss of data + log_pagination_model_json2 = log_pagination_model.to_dict() + assert log_pagination_model_json2 == log_pagination_model_json + + +class TestModel_LogRequest: + """ + Test Class for LogRequest + """ + + def test_log_request_serialization(self): + """ + Test serialization/deserialization for LogRequest + """ + + # Construct dict forms of any model objects needed in order to build this model. + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + runtime_intent_model['skill'] = 'testString' + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + runtime_entity_model['skill'] = 'testString' + + message_input_attachment_model = {} # MessageInputAttachment + message_input_attachment_model['url'] = 'testString' + message_input_attachment_model['media_type'] = 'testString' + + request_analytics_model = {} # RequestAnalytics + request_analytics_model['browser'] = 'testString' + request_analytics_model['device'] = 'testString' + request_analytics_model['pageUrl'] = 'testString' + + message_input_options_spelling_model = {} # MessageInputOptionsSpelling + message_input_options_spelling_model['suggestions'] = True + message_input_options_spelling_model['auto_correct'] = True + + message_input_options_model = {} # MessageInputOptions + message_input_options_model['restart'] = False + message_input_options_model['alternate_intents'] = False + message_input_options_model['async_callout'] = False + message_input_options_model['spelling'] = message_input_options_spelling_model + message_input_options_model['debug'] = False + message_input_options_model['return_context'] = True + message_input_options_model['export'] = True + + log_request_input_model = {} # LogRequestInput + log_request_input_model['message_type'] = 'text' + log_request_input_model['text'] = 'Hello' + log_request_input_model['intents'] = [runtime_intent_model] + log_request_input_model['entities'] = [runtime_entity_model] + log_request_input_model['suggestion_id'] = 'testString' + log_request_input_model['attachments'] = [message_input_attachment_model] + log_request_input_model['analytics'] = request_analytics_model + log_request_input_model['options'] = message_input_options_model + + message_context_global_system_model = {} # MessageContextGlobalSystem + message_context_global_system_model['timezone'] = 'testString' + message_context_global_system_model['user_id'] = 'my_user_id' + message_context_global_system_model['turn_count'] = 38 + message_context_global_system_model['locale'] = 'en-us' + message_context_global_system_model['reference_time'] = 'testString' + message_context_global_system_model['session_start_time'] = 'testString' + message_context_global_system_model['state'] = 'testString' + message_context_global_system_model['skip_user_input'] = True + + message_context_global_model = {} # MessageContextGlobal + message_context_global_model['system'] = message_context_global_system_model + + message_context_skill_system_model = {} # MessageContextSkillSystem + message_context_skill_system_model['state'] = 'testString' + message_context_skill_system_model['foo'] = 'testString' + + message_context_dialog_skill_model = {} # MessageContextDialogSkill + message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'} + message_context_dialog_skill_model['system'] = message_context_skill_system_model + + message_context_action_skill_model = {} # MessageContextActionSkill + message_context_action_skill_model['user_defined'] = {'anyKey': 'anyValue'} + message_context_action_skill_model['system'] = message_context_skill_system_model + message_context_action_skill_model['action_variables'] = {'anyKey': 'anyValue'} + message_context_action_skill_model['skill_variables'] = {'anyKey': 'anyValue'} + + message_context_skills_model = {} # MessageContextSkills + message_context_skills_model['main skill'] = message_context_dialog_skill_model + message_context_skills_model['actions skill'] = message_context_action_skill_model + + message_context_model = {} # MessageContext + message_context_model['global'] = message_context_global_model + message_context_model['skills'] = message_context_skills_model + message_context_model['integrations'] = {'anyKey': 'anyValue'} + + # Construct a json representation of a LogRequest model + log_request_model_json = {} + log_request_model_json['input'] = log_request_input_model + log_request_model_json['context'] = message_context_model + log_request_model_json['user_id'] = 'testString' + + # Construct a model instance of LogRequest by calling from_dict on the json representation + log_request_model = LogRequest.from_dict(log_request_model_json) + assert log_request_model != False + + # Construct a model instance of LogRequest by calling from_dict on the json representation + log_request_model_dict = LogRequest.from_dict(log_request_model_json).__dict__ + log_request_model2 = LogRequest(**log_request_model_dict) + + # Verify the model instances are equivalent + assert log_request_model == log_request_model2 + + # Convert model instance back to dict and verify no loss of data + log_request_model_json2 = log_request_model.to_dict() + assert log_request_model_json2 == log_request_model_json + + +class TestModel_LogRequestInput: + """ + Test Class for LogRequestInput + """ + + def test_log_request_input_serialization(self): + """ + Test serialization/deserialization for LogRequestInput + """ + + # Construct dict forms of any model objects needed in order to build this model. + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + runtime_intent_model['skill'] = 'testString' + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + runtime_entity_model['skill'] = 'testString' + + message_input_attachment_model = {} # MessageInputAttachment + message_input_attachment_model['url'] = 'testString' + message_input_attachment_model['media_type'] = 'testString' + + request_analytics_model = {} # RequestAnalytics + request_analytics_model['browser'] = 'testString' + request_analytics_model['device'] = 'testString' + request_analytics_model['pageUrl'] = 'testString' + + message_input_options_spelling_model = {} # MessageInputOptionsSpelling + message_input_options_spelling_model['suggestions'] = True + message_input_options_spelling_model['auto_correct'] = True + + message_input_options_model = {} # MessageInputOptions + message_input_options_model['restart'] = False + message_input_options_model['alternate_intents'] = False + message_input_options_model['async_callout'] = False + message_input_options_model['spelling'] = message_input_options_spelling_model + message_input_options_model['debug'] = False + message_input_options_model['return_context'] = False + message_input_options_model['export'] = False + + # Construct a json representation of a LogRequestInput model + log_request_input_model_json = {} + log_request_input_model_json['message_type'] = 'text' + log_request_input_model_json['text'] = 'testString' + log_request_input_model_json['intents'] = [runtime_intent_model] + log_request_input_model_json['entities'] = [runtime_entity_model] + log_request_input_model_json['suggestion_id'] = 'testString' + log_request_input_model_json['attachments'] = [message_input_attachment_model] + log_request_input_model_json['analytics'] = request_analytics_model + log_request_input_model_json['options'] = message_input_options_model + + # Construct a model instance of LogRequestInput by calling from_dict on the json representation + log_request_input_model = LogRequestInput.from_dict(log_request_input_model_json) + assert log_request_input_model != False + + # Construct a model instance of LogRequestInput by calling from_dict on the json representation + log_request_input_model_dict = LogRequestInput.from_dict(log_request_input_model_json).__dict__ + log_request_input_model2 = LogRequestInput(**log_request_input_model_dict) + + # Verify the model instances are equivalent + assert log_request_input_model == log_request_input_model2 + + # Convert model instance back to dict and verify no loss of data + log_request_input_model_json2 = log_request_input_model.to_dict() + assert log_request_input_model_json2 == log_request_input_model_json + + +class TestModel_LogResponse: + """ + Test Class for LogResponse + """ + + def test_log_response_serialization(self): + """ + Test serialization/deserialization for LogResponse + """ + + # Construct dict forms of any model objects needed in order to build this model. + + response_generic_citation_ranges_item_model = {} # ResponseGenericCitationRangesItem + response_generic_citation_ranges_item_model['start'] = 38 + response_generic_citation_ranges_item_model['end'] = 38 + + response_generic_citation_model = {} # ResponseGenericCitation + response_generic_citation_model['title'] = 'testString' + response_generic_citation_model['text'] = 'testString' + response_generic_citation_model['body'] = 'testString' + response_generic_citation_model['search_result_index'] = 38 + response_generic_citation_model['ranges'] = [response_generic_citation_ranges_item_model] + + response_generic_confidence_scores_model = {} # ResponseGenericConfidenceScores + response_generic_confidence_scores_model['threshold'] = 72.5 + response_generic_confidence_scores_model['pre_gen'] = 72.5 + response_generic_confidence_scores_model['post_gen'] = 72.5 + response_generic_confidence_scores_model['extractiveness'] = 72.5 + + search_results_result_metadata_model = {} # SearchResultsResultMetadata + search_results_result_metadata_model['document_retrieval_source'] = 'testString' + search_results_result_metadata_model['score'] = 38 + + search_results_model = {} # SearchResults + search_results_model['result_metadata'] = search_results_result_metadata_model + search_results_model['id'] = 'testString' + search_results_model['title'] = 'testString' + search_results_model['body'] = 'testString' + + runtime_response_generic_model = {} # RuntimeResponseGenericRuntimeResponseTypeConversationalSearch + runtime_response_generic_model['response_type'] = 'conversation_search' + runtime_response_generic_model['text'] = 'testString' + runtime_response_generic_model['citations_title'] = 'testString' + runtime_response_generic_model['citations'] = [response_generic_citation_model] + runtime_response_generic_model['confidence_scores'] = response_generic_confidence_scores_model + runtime_response_generic_model['response_length_option'] = 'testString' + runtime_response_generic_model['search_results'] = [search_results_model] + runtime_response_generic_model['disclaimer'] = 'testString' + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + runtime_intent_model['skill'] = 'testString' + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + runtime_entity_model['skill'] = 'testString' + + dialog_node_action_model = {} # DialogNodeAction + dialog_node_action_model['name'] = 'testString' + dialog_node_action_model['type'] = 'client' + dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'} + dialog_node_action_model['result_variable'] = 'testString' + dialog_node_action_model['credentials'] = 'testString' + + dialog_node_visited_model = {} # DialogNodeVisited + dialog_node_visited_model['dialog_node'] = 'testString' + dialog_node_visited_model['title'] = 'testString' + dialog_node_visited_model['conditions'] = 'testString' + + log_message_source_model = {} # LogMessageSourceDialogNode + log_message_source_model['type'] = 'dialog_node' + log_message_source_model['dialog_node'] = 'testString' + + dialog_log_message_model = {} # DialogLogMessage + dialog_log_message_model['level'] = 'info' + dialog_log_message_model['message'] = 'testString' + dialog_log_message_model['code'] = 'testString' + dialog_log_message_model['source'] = log_message_source_model + + turn_event_action_source_model = {} # TurnEventActionSource + turn_event_action_source_model['type'] = 'action' + turn_event_action_source_model['action'] = 'testString' + turn_event_action_source_model['action_title'] = 'testString' + turn_event_action_source_model['condition'] = 'testString' + + message_output_debug_turn_event_model = {} # MessageOutputDebugTurnEventTurnEventActionVisited + message_output_debug_turn_event_model['event'] = 'action_visited' + message_output_debug_turn_event_model['source'] = turn_event_action_source_model + message_output_debug_turn_event_model['action_start_time'] = 'testString' + message_output_debug_turn_event_model['condition_type'] = 'user_defined' + message_output_debug_turn_event_model['reason'] = 'intent' + message_output_debug_turn_event_model['result_variable'] = 'testString' + + message_output_debug_model = {} # MessageOutputDebug + message_output_debug_model['nodes_visited'] = [dialog_node_visited_model] + message_output_debug_model['log_messages'] = [dialog_log_message_model] + message_output_debug_model['branch_exited'] = True + message_output_debug_model['branch_exited_reason'] = 'completed' + message_output_debug_model['turn_events'] = [message_output_debug_turn_event_model] + + message_output_spelling_model = {} # MessageOutputSpelling + message_output_spelling_model['text'] = 'testString' + message_output_spelling_model['original_text'] = 'testString' + message_output_spelling_model['suggested_text'] = 'testString' + + message_output_llm_metadata_model = {} # MessageOutputLLMMetadata + message_output_llm_metadata_model['task'] = 'testString' + message_output_llm_metadata_model['model_id'] = 'testString' + + log_response_output_model = {} # LogResponseOutput + log_response_output_model['generic'] = [runtime_response_generic_model] + log_response_output_model['intents'] = [runtime_intent_model] + log_response_output_model['entities'] = [runtime_entity_model] + log_response_output_model['actions'] = [dialog_node_action_model] + log_response_output_model['debug'] = message_output_debug_model + log_response_output_model['user_defined'] = {'anyKey': 'anyValue'} + log_response_output_model['spelling'] = message_output_spelling_model + log_response_output_model['llm_metadata'] = [message_output_llm_metadata_model] + + message_context_global_system_model = {} # MessageContextGlobalSystem + message_context_global_system_model['timezone'] = 'testString' + message_context_global_system_model['user_id'] = 'testString' + message_context_global_system_model['turn_count'] = 38 + message_context_global_system_model['locale'] = 'en-us' + message_context_global_system_model['reference_time'] = 'testString' + message_context_global_system_model['session_start_time'] = 'testString' + message_context_global_system_model['state'] = 'testString' + message_context_global_system_model['skip_user_input'] = True + + message_context_global_model = {} # MessageContextGlobal + message_context_global_model['system'] = message_context_global_system_model + + message_context_skill_system_model = {} # MessageContextSkillSystem + message_context_skill_system_model['state'] = 'testString' + message_context_skill_system_model['foo'] = 'testString' + + message_context_dialog_skill_model = {} # MessageContextDialogSkill + message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'} + message_context_dialog_skill_model['system'] = message_context_skill_system_model + + message_context_action_skill_model = {} # MessageContextActionSkill + message_context_action_skill_model['user_defined'] = {'anyKey': 'anyValue'} + message_context_action_skill_model['system'] = message_context_skill_system_model + message_context_action_skill_model['action_variables'] = {'anyKey': 'anyValue'} + message_context_action_skill_model['skill_variables'] = {'anyKey': 'anyValue'} + + message_context_skills_model = {} # MessageContextSkills + message_context_skills_model['main skill'] = message_context_dialog_skill_model + message_context_skills_model['actions skill'] = message_context_action_skill_model + + message_context_model = {} # MessageContext + message_context_model['global'] = message_context_global_model + message_context_model['skills'] = message_context_skills_model + message_context_model['integrations'] = {'anyKey': 'anyValue'} + + # Construct a json representation of a LogResponse model + log_response_model_json = {} + log_response_model_json['output'] = log_response_output_model + log_response_model_json['context'] = message_context_model + log_response_model_json['user_id'] = 'testString' + + # Construct a model instance of LogResponse by calling from_dict on the json representation + log_response_model = LogResponse.from_dict(log_response_model_json) + assert log_response_model != False + + # Construct a model instance of LogResponse by calling from_dict on the json representation + log_response_model_dict = LogResponse.from_dict(log_response_model_json).__dict__ + log_response_model2 = LogResponse(**log_response_model_dict) + + # Verify the model instances are equivalent + assert log_response_model == log_response_model2 + + # Convert model instance back to dict and verify no loss of data + log_response_model_json2 = log_response_model.to_dict() + assert log_response_model_json2 == log_response_model_json + + +class TestModel_LogResponseOutput: + """ + Test Class for LogResponseOutput + """ + + def test_log_response_output_serialization(self): + """ + Test serialization/deserialization for LogResponseOutput + """ + + # Construct dict forms of any model objects needed in order to build this model. + + response_generic_citation_ranges_item_model = {} # ResponseGenericCitationRangesItem + response_generic_citation_ranges_item_model['start'] = 38 + response_generic_citation_ranges_item_model['end'] = 38 + + response_generic_citation_model = {} # ResponseGenericCitation + response_generic_citation_model['title'] = 'testString' + response_generic_citation_model['text'] = 'testString' + response_generic_citation_model['body'] = 'testString' + response_generic_citation_model['search_result_index'] = 38 + response_generic_citation_model['ranges'] = [response_generic_citation_ranges_item_model] + + response_generic_confidence_scores_model = {} # ResponseGenericConfidenceScores + response_generic_confidence_scores_model['threshold'] = 72.5 + response_generic_confidence_scores_model['pre_gen'] = 72.5 + response_generic_confidence_scores_model['post_gen'] = 72.5 + response_generic_confidence_scores_model['extractiveness'] = 72.5 + + search_results_result_metadata_model = {} # SearchResultsResultMetadata + search_results_result_metadata_model['document_retrieval_source'] = 'testString' + search_results_result_metadata_model['score'] = 38 + + search_results_model = {} # SearchResults + search_results_model['result_metadata'] = search_results_result_metadata_model + search_results_model['id'] = 'testString' + search_results_model['title'] = 'testString' + search_results_model['body'] = 'testString' + + runtime_response_generic_model = {} # RuntimeResponseGenericRuntimeResponseTypeConversationalSearch + runtime_response_generic_model['response_type'] = 'conversation_search' + runtime_response_generic_model['text'] = 'testString' + runtime_response_generic_model['citations_title'] = 'testString' + runtime_response_generic_model['citations'] = [response_generic_citation_model] + runtime_response_generic_model['confidence_scores'] = response_generic_confidence_scores_model + runtime_response_generic_model['response_length_option'] = 'testString' + runtime_response_generic_model['search_results'] = [search_results_model] + runtime_response_generic_model['disclaimer'] = 'testString' + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + runtime_intent_model['skill'] = 'testString' + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + runtime_entity_model['skill'] = 'testString' + + dialog_node_action_model = {} # DialogNodeAction + dialog_node_action_model['name'] = 'testString' + dialog_node_action_model['type'] = 'client' + dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'} + dialog_node_action_model['result_variable'] = 'testString' + dialog_node_action_model['credentials'] = 'testString' + + dialog_node_visited_model = {} # DialogNodeVisited + dialog_node_visited_model['dialog_node'] = 'testString' + dialog_node_visited_model['title'] = 'testString' + dialog_node_visited_model['conditions'] = 'testString' + + log_message_source_model = {} # LogMessageSourceDialogNode + log_message_source_model['type'] = 'dialog_node' + log_message_source_model['dialog_node'] = 'testString' + + dialog_log_message_model = {} # DialogLogMessage + dialog_log_message_model['level'] = 'info' + dialog_log_message_model['message'] = 'testString' + dialog_log_message_model['code'] = 'testString' + dialog_log_message_model['source'] = log_message_source_model + + turn_event_action_source_model = {} # TurnEventActionSource + turn_event_action_source_model['type'] = 'action' + turn_event_action_source_model['action'] = 'testString' + turn_event_action_source_model['action_title'] = 'testString' + turn_event_action_source_model['condition'] = 'testString' + + message_output_debug_turn_event_model = {} # MessageOutputDebugTurnEventTurnEventActionVisited + message_output_debug_turn_event_model['event'] = 'action_visited' + message_output_debug_turn_event_model['source'] = turn_event_action_source_model + message_output_debug_turn_event_model['action_start_time'] = 'testString' + message_output_debug_turn_event_model['condition_type'] = 'user_defined' + message_output_debug_turn_event_model['reason'] = 'intent' + message_output_debug_turn_event_model['result_variable'] = 'testString' + + message_output_debug_model = {} # MessageOutputDebug + message_output_debug_model['nodes_visited'] = [dialog_node_visited_model] + message_output_debug_model['log_messages'] = [dialog_log_message_model] + message_output_debug_model['branch_exited'] = True + message_output_debug_model['branch_exited_reason'] = 'completed' + message_output_debug_model['turn_events'] = [message_output_debug_turn_event_model] + + message_output_spelling_model = {} # MessageOutputSpelling + message_output_spelling_model['text'] = 'testString' + message_output_spelling_model['original_text'] = 'testString' + message_output_spelling_model['suggested_text'] = 'testString' + + message_output_llm_metadata_model = {} # MessageOutputLLMMetadata + message_output_llm_metadata_model['task'] = 'testString' + message_output_llm_metadata_model['model_id'] = 'testString' + + # Construct a json representation of a LogResponseOutput model + log_response_output_model_json = {} + log_response_output_model_json['generic'] = [runtime_response_generic_model] + log_response_output_model_json['intents'] = [runtime_intent_model] + log_response_output_model_json['entities'] = [runtime_entity_model] + log_response_output_model_json['actions'] = [dialog_node_action_model] + log_response_output_model_json['debug'] = message_output_debug_model + log_response_output_model_json['user_defined'] = {'anyKey': 'anyValue'} + log_response_output_model_json['spelling'] = message_output_spelling_model + log_response_output_model_json['llm_metadata'] = [message_output_llm_metadata_model] + + # Construct a model instance of LogResponseOutput by calling from_dict on the json representation + log_response_output_model = LogResponseOutput.from_dict(log_response_output_model_json) + assert log_response_output_model != False + + # Construct a model instance of LogResponseOutput by calling from_dict on the json representation + log_response_output_model_dict = LogResponseOutput.from_dict(log_response_output_model_json).__dict__ + log_response_output_model2 = LogResponseOutput(**log_response_output_model_dict) + + # Verify the model instances are equivalent + assert log_response_output_model == log_response_output_model2 + + # Convert model instance back to dict and verify no loss of data + log_response_output_model_json2 = log_response_output_model.to_dict() + assert log_response_output_model_json2 == log_response_output_model_json + + +class TestModel_MessageContext: + """ + Test Class for MessageContext + """ + + def test_message_context_serialization(self): + """ + Test serialization/deserialization for MessageContext + """ + + # Construct dict forms of any model objects needed in order to build this model. + + message_context_global_system_model = {} # MessageContextGlobalSystem + message_context_global_system_model['timezone'] = 'testString' + message_context_global_system_model['user_id'] = 'testString' + message_context_global_system_model['turn_count'] = 38 + message_context_global_system_model['locale'] = 'en-us' + message_context_global_system_model['reference_time'] = 'testString' + message_context_global_system_model['session_start_time'] = 'testString' + message_context_global_system_model['state'] = 'testString' + message_context_global_system_model['skip_user_input'] = True + + message_context_global_model = {} # MessageContextGlobal + message_context_global_model['system'] = message_context_global_system_model + + message_context_skill_system_model = {} # MessageContextSkillSystem + message_context_skill_system_model['state'] = 'testString' + message_context_skill_system_model['foo'] = 'testString' + + message_context_dialog_skill_model = {} # MessageContextDialogSkill + message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'} + message_context_dialog_skill_model['system'] = message_context_skill_system_model + + message_context_action_skill_model = {} # MessageContextActionSkill + message_context_action_skill_model['user_defined'] = {'anyKey': 'anyValue'} + message_context_action_skill_model['system'] = message_context_skill_system_model + message_context_action_skill_model['action_variables'] = {'anyKey': 'anyValue'} + message_context_action_skill_model['skill_variables'] = {'anyKey': 'anyValue'} + + message_context_skills_model = {} # MessageContextSkills + message_context_skills_model['main skill'] = message_context_dialog_skill_model + message_context_skills_model['actions skill'] = message_context_action_skill_model + + # Construct a json representation of a MessageContext model + message_context_model_json = {} + message_context_model_json['global'] = message_context_global_model + message_context_model_json['skills'] = message_context_skills_model + message_context_model_json['integrations'] = {'anyKey': 'anyValue'} + + # Construct a model instance of MessageContext by calling from_dict on the json representation + message_context_model = MessageContext.from_dict(message_context_model_json) + assert message_context_model != False + + # Construct a model instance of MessageContext by calling from_dict on the json representation + message_context_model_dict = MessageContext.from_dict(message_context_model_json).__dict__ + message_context_model2 = MessageContext(**message_context_model_dict) + + # Verify the model instances are equivalent + assert message_context_model == message_context_model2 + + # Convert model instance back to dict and verify no loss of data + message_context_model_json2 = message_context_model.to_dict() + assert message_context_model_json2 == message_context_model_json + + +class TestModel_MessageContextActionSkill: + """ + Test Class for MessageContextActionSkill + """ + + def test_message_context_action_skill_serialization(self): + """ + Test serialization/deserialization for MessageContextActionSkill + """ + + # Construct dict forms of any model objects needed in order to build this model. + + message_context_skill_system_model = {} # MessageContextSkillSystem + message_context_skill_system_model['state'] = 'testString' + message_context_skill_system_model['foo'] = 'testString' + + # Construct a json representation of a MessageContextActionSkill model + message_context_action_skill_model_json = {} + message_context_action_skill_model_json['user_defined'] = {'anyKey': 'anyValue'} + message_context_action_skill_model_json['system'] = message_context_skill_system_model + message_context_action_skill_model_json['action_variables'] = {'anyKey': 'anyValue'} + message_context_action_skill_model_json['skill_variables'] = {'anyKey': 'anyValue'} + + # Construct a model instance of MessageContextActionSkill by calling from_dict on the json representation + message_context_action_skill_model = MessageContextActionSkill.from_dict(message_context_action_skill_model_json) + assert message_context_action_skill_model != False + + # Construct a model instance of MessageContextActionSkill by calling from_dict on the json representation + message_context_action_skill_model_dict = MessageContextActionSkill.from_dict(message_context_action_skill_model_json).__dict__ + message_context_action_skill_model2 = MessageContextActionSkill(**message_context_action_skill_model_dict) + + # Verify the model instances are equivalent + assert message_context_action_skill_model == message_context_action_skill_model2 + + # Convert model instance back to dict and verify no loss of data + message_context_action_skill_model_json2 = message_context_action_skill_model.to_dict() + assert message_context_action_skill_model_json2 == message_context_action_skill_model_json + + +class TestModel_MessageContextDialogSkill: + """ + Test Class for MessageContextDialogSkill + """ + + def test_message_context_dialog_skill_serialization(self): + """ + Test serialization/deserialization for MessageContextDialogSkill + """ + + # Construct dict forms of any model objects needed in order to build this model. + + message_context_skill_system_model = {} # MessageContextSkillSystem + message_context_skill_system_model['state'] = 'testString' + message_context_skill_system_model['foo'] = 'testString' + + # Construct a json representation of a MessageContextDialogSkill model + message_context_dialog_skill_model_json = {} + message_context_dialog_skill_model_json['user_defined'] = {'anyKey': 'anyValue'} + message_context_dialog_skill_model_json['system'] = message_context_skill_system_model + + # Construct a model instance of MessageContextDialogSkill by calling from_dict on the json representation + message_context_dialog_skill_model = MessageContextDialogSkill.from_dict(message_context_dialog_skill_model_json) + assert message_context_dialog_skill_model != False + + # Construct a model instance of MessageContextDialogSkill by calling from_dict on the json representation + message_context_dialog_skill_model_dict = MessageContextDialogSkill.from_dict(message_context_dialog_skill_model_json).__dict__ + message_context_dialog_skill_model2 = MessageContextDialogSkill(**message_context_dialog_skill_model_dict) + + # Verify the model instances are equivalent + assert message_context_dialog_skill_model == message_context_dialog_skill_model2 + + # Convert model instance back to dict and verify no loss of data + message_context_dialog_skill_model_json2 = message_context_dialog_skill_model.to_dict() + assert message_context_dialog_skill_model_json2 == message_context_dialog_skill_model_json + + +class TestModel_MessageContextGlobal: + """ + Test Class for MessageContextGlobal + """ + + def test_message_context_global_serialization(self): + """ + Test serialization/deserialization for MessageContextGlobal + """ + + # Construct dict forms of any model objects needed in order to build this model. + + message_context_global_system_model = {} # MessageContextGlobalSystem + message_context_global_system_model['timezone'] = 'testString' + message_context_global_system_model['user_id'] = 'testString' + message_context_global_system_model['turn_count'] = 38 + message_context_global_system_model['locale'] = 'en-us' + message_context_global_system_model['reference_time'] = 'testString' + message_context_global_system_model['session_start_time'] = 'testString' + message_context_global_system_model['state'] = 'testString' + message_context_global_system_model['skip_user_input'] = True + + # Construct a json representation of a MessageContextGlobal model + message_context_global_model_json = {} + message_context_global_model_json['system'] = message_context_global_system_model + + # Construct a model instance of MessageContextGlobal by calling from_dict on the json representation + message_context_global_model = MessageContextGlobal.from_dict(message_context_global_model_json) + assert message_context_global_model != False + + # Construct a model instance of MessageContextGlobal by calling from_dict on the json representation + message_context_global_model_dict = MessageContextGlobal.from_dict(message_context_global_model_json).__dict__ + message_context_global_model2 = MessageContextGlobal(**message_context_global_model_dict) + + # Verify the model instances are equivalent + assert message_context_global_model == message_context_global_model2 + + # Convert model instance back to dict and verify no loss of data + message_context_global_model_json2 = message_context_global_model.to_dict() + assert message_context_global_model_json2 == message_context_global_model_json + + +class TestModel_MessageContextGlobalSystem: + """ + Test Class for MessageContextGlobalSystem + """ + + def test_message_context_global_system_serialization(self): + """ + Test serialization/deserialization for MessageContextGlobalSystem + """ + + # Construct a json representation of a MessageContextGlobalSystem model + message_context_global_system_model_json = {} + message_context_global_system_model_json['timezone'] = 'testString' + message_context_global_system_model_json['user_id'] = 'testString' + message_context_global_system_model_json['turn_count'] = 38 + message_context_global_system_model_json['locale'] = 'en-us' + message_context_global_system_model_json['reference_time'] = 'testString' + message_context_global_system_model_json['session_start_time'] = 'testString' + message_context_global_system_model_json['state'] = 'testString' + message_context_global_system_model_json['skip_user_input'] = True + + # Construct a model instance of MessageContextGlobalSystem by calling from_dict on the json representation + message_context_global_system_model = MessageContextGlobalSystem.from_dict(message_context_global_system_model_json) + assert message_context_global_system_model != False + + # Construct a model instance of MessageContextGlobalSystem by calling from_dict on the json representation + message_context_global_system_model_dict = MessageContextGlobalSystem.from_dict(message_context_global_system_model_json).__dict__ + message_context_global_system_model2 = MessageContextGlobalSystem(**message_context_global_system_model_dict) + + # Verify the model instances are equivalent + assert message_context_global_system_model == message_context_global_system_model2 + + # Convert model instance back to dict and verify no loss of data + message_context_global_system_model_json2 = message_context_global_system_model.to_dict() + assert message_context_global_system_model_json2 == message_context_global_system_model_json + + +class TestModel_MessageContextSkillSystem: + """ + Test Class for MessageContextSkillSystem + """ + + def test_message_context_skill_system_serialization(self): + """ + Test serialization/deserialization for MessageContextSkillSystem + """ + + # Construct a json representation of a MessageContextSkillSystem model + message_context_skill_system_model_json = {} + message_context_skill_system_model_json['state'] = 'testString' + message_context_skill_system_model_json['foo'] = 'testString' + + # Construct a model instance of MessageContextSkillSystem by calling from_dict on the json representation + message_context_skill_system_model = MessageContextSkillSystem.from_dict(message_context_skill_system_model_json) + assert message_context_skill_system_model != False + + # Construct a model instance of MessageContextSkillSystem by calling from_dict on the json representation + message_context_skill_system_model_dict = MessageContextSkillSystem.from_dict(message_context_skill_system_model_json).__dict__ + message_context_skill_system_model2 = MessageContextSkillSystem(**message_context_skill_system_model_dict) + + # Verify the model instances are equivalent + assert message_context_skill_system_model == message_context_skill_system_model2 + + # Convert model instance back to dict and verify no loss of data + message_context_skill_system_model_json2 = message_context_skill_system_model.to_dict() + assert message_context_skill_system_model_json2 == message_context_skill_system_model_json + + # Test get_properties and set_properties methods. + message_context_skill_system_model.set_properties({}) + actual_dict = message_context_skill_system_model.get_properties() + assert actual_dict == {} + + expected_dict = {'foo': 'testString'} + message_context_skill_system_model.set_properties(expected_dict) + actual_dict = message_context_skill_system_model.get_properties() + assert actual_dict.keys() == expected_dict.keys() + + +class TestModel_MessageContextSkills: + """ + Test Class for MessageContextSkills + """ + + def test_message_context_skills_serialization(self): + """ + Test serialization/deserialization for MessageContextSkills + """ + + # Construct dict forms of any model objects needed in order to build this model. + + message_context_skill_system_model = {} # MessageContextSkillSystem + message_context_skill_system_model['state'] = 'testString' + message_context_skill_system_model['foo'] = 'testString' + + message_context_dialog_skill_model = {} # MessageContextDialogSkill + message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'} + message_context_dialog_skill_model['system'] = message_context_skill_system_model + + message_context_action_skill_model = {} # MessageContextActionSkill + message_context_action_skill_model['user_defined'] = {'anyKey': 'anyValue'} + message_context_action_skill_model['system'] = message_context_skill_system_model + message_context_action_skill_model['action_variables'] = {'anyKey': 'anyValue'} + message_context_action_skill_model['skill_variables'] = {'anyKey': 'anyValue'} + + # Construct a json representation of a MessageContextSkills model + message_context_skills_model_json = {} + message_context_skills_model_json['main skill'] = message_context_dialog_skill_model + message_context_skills_model_json['actions skill'] = message_context_action_skill_model + + # Construct a model instance of MessageContextSkills by calling from_dict on the json representation + message_context_skills_model = MessageContextSkills.from_dict(message_context_skills_model_json) + assert message_context_skills_model != False + + # Construct a model instance of MessageContextSkills by calling from_dict on the json representation + message_context_skills_model_dict = MessageContextSkills.from_dict(message_context_skills_model_json).__dict__ + message_context_skills_model2 = MessageContextSkills(**message_context_skills_model_dict) + + # Verify the model instances are equivalent + assert message_context_skills_model == message_context_skills_model2 + + # Convert model instance back to dict and verify no loss of data + message_context_skills_model_json2 = message_context_skills_model.to_dict() + assert message_context_skills_model_json2 == message_context_skills_model_json + + +class TestModel_MessageInput: + """ + Test Class for MessageInput + """ + + def test_message_input_serialization(self): + """ + Test serialization/deserialization for MessageInput + """ + + # Construct dict forms of any model objects needed in order to build this model. + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + runtime_intent_model['skill'] = 'testString' + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + runtime_entity_model['skill'] = 'testString' + + message_input_attachment_model = {} # MessageInputAttachment + message_input_attachment_model['url'] = 'testString' + message_input_attachment_model['media_type'] = 'testString' + + request_analytics_model = {} # RequestAnalytics + request_analytics_model['browser'] = 'testString' + request_analytics_model['device'] = 'testString' + request_analytics_model['pageUrl'] = 'testString' + + message_input_options_spelling_model = {} # MessageInputOptionsSpelling + message_input_options_spelling_model['suggestions'] = True + message_input_options_spelling_model['auto_correct'] = True + + message_input_options_model = {} # MessageInputOptions + message_input_options_model['restart'] = False + message_input_options_model['alternate_intents'] = False + message_input_options_model['async_callout'] = False + message_input_options_model['spelling'] = message_input_options_spelling_model + message_input_options_model['debug'] = False + message_input_options_model['return_context'] = False + message_input_options_model['export'] = False + + # Construct a json representation of a MessageInput model + message_input_model_json = {} + message_input_model_json['message_type'] = 'text' + message_input_model_json['text'] = 'testString' + message_input_model_json['intents'] = [runtime_intent_model] + message_input_model_json['entities'] = [runtime_entity_model] + message_input_model_json['suggestion_id'] = 'testString' + message_input_model_json['attachments'] = [message_input_attachment_model] + message_input_model_json['analytics'] = request_analytics_model + message_input_model_json['options'] = message_input_options_model + + # Construct a model instance of MessageInput by calling from_dict on the json representation + message_input_model = MessageInput.from_dict(message_input_model_json) + assert message_input_model != False + + # Construct a model instance of MessageInput by calling from_dict on the json representation + message_input_model_dict = MessageInput.from_dict(message_input_model_json).__dict__ + message_input_model2 = MessageInput(**message_input_model_dict) + + # Verify the model instances are equivalent + assert message_input_model == message_input_model2 + + # Convert model instance back to dict and verify no loss of data + message_input_model_json2 = message_input_model.to_dict() + assert message_input_model_json2 == message_input_model_json + + +class TestModel_MessageInputAttachment: + """ + Test Class for MessageInputAttachment + """ + + def test_message_input_attachment_serialization(self): + """ + Test serialization/deserialization for MessageInputAttachment + """ + + # Construct a json representation of a MessageInputAttachment model + message_input_attachment_model_json = {} + message_input_attachment_model_json['url'] = 'testString' + message_input_attachment_model_json['media_type'] = 'testString' + + # Construct a model instance of MessageInputAttachment by calling from_dict on the json representation + message_input_attachment_model = MessageInputAttachment.from_dict(message_input_attachment_model_json) + assert message_input_attachment_model != False + + # Construct a model instance of MessageInputAttachment by calling from_dict on the json representation + message_input_attachment_model_dict = MessageInputAttachment.from_dict(message_input_attachment_model_json).__dict__ + message_input_attachment_model2 = MessageInputAttachment(**message_input_attachment_model_dict) + + # Verify the model instances are equivalent + assert message_input_attachment_model == message_input_attachment_model2 + + # Convert model instance back to dict and verify no loss of data + message_input_attachment_model_json2 = message_input_attachment_model.to_dict() + assert message_input_attachment_model_json2 == message_input_attachment_model_json + + +class TestModel_MessageInputOptions: + """ + Test Class for MessageInputOptions + """ + + def test_message_input_options_serialization(self): + """ + Test serialization/deserialization for MessageInputOptions + """ + + # Construct dict forms of any model objects needed in order to build this model. + + message_input_options_spelling_model = {} # MessageInputOptionsSpelling + message_input_options_spelling_model['suggestions'] = True + message_input_options_spelling_model['auto_correct'] = True + + # Construct a json representation of a MessageInputOptions model + message_input_options_model_json = {} + message_input_options_model_json['restart'] = False + message_input_options_model_json['alternate_intents'] = False + message_input_options_model_json['async_callout'] = False + message_input_options_model_json['spelling'] = message_input_options_spelling_model + message_input_options_model_json['debug'] = False + message_input_options_model_json['return_context'] = False + message_input_options_model_json['export'] = False + + # Construct a model instance of MessageInputOptions by calling from_dict on the json representation + message_input_options_model = MessageInputOptions.from_dict(message_input_options_model_json) + assert message_input_options_model != False + + # Construct a model instance of MessageInputOptions by calling from_dict on the json representation + message_input_options_model_dict = MessageInputOptions.from_dict(message_input_options_model_json).__dict__ + message_input_options_model2 = MessageInputOptions(**message_input_options_model_dict) + + # Verify the model instances are equivalent + assert message_input_options_model == message_input_options_model2 + + # Convert model instance back to dict and verify no loss of data + message_input_options_model_json2 = message_input_options_model.to_dict() + assert message_input_options_model_json2 == message_input_options_model_json + + +class TestModel_MessageInputOptionsSpelling: + """ + Test Class for MessageInputOptionsSpelling + """ + + def test_message_input_options_spelling_serialization(self): + """ + Test serialization/deserialization for MessageInputOptionsSpelling + """ + + # Construct a json representation of a MessageInputOptionsSpelling model + message_input_options_spelling_model_json = {} + message_input_options_spelling_model_json['suggestions'] = True + message_input_options_spelling_model_json['auto_correct'] = True + + # Construct a model instance of MessageInputOptionsSpelling by calling from_dict on the json representation + message_input_options_spelling_model = MessageInputOptionsSpelling.from_dict(message_input_options_spelling_model_json) + assert message_input_options_spelling_model != False + + # Construct a model instance of MessageInputOptionsSpelling by calling from_dict on the json representation + message_input_options_spelling_model_dict = MessageInputOptionsSpelling.from_dict(message_input_options_spelling_model_json).__dict__ + message_input_options_spelling_model2 = MessageInputOptionsSpelling(**message_input_options_spelling_model_dict) + + # Verify the model instances are equivalent + assert message_input_options_spelling_model == message_input_options_spelling_model2 + + # Convert model instance back to dict and verify no loss of data + message_input_options_spelling_model_json2 = message_input_options_spelling_model.to_dict() + assert message_input_options_spelling_model_json2 == message_input_options_spelling_model_json + + +class TestModel_MessageOutput: + """ + Test Class for MessageOutput + """ + + def test_message_output_serialization(self): + """ + Test serialization/deserialization for MessageOutput + """ + + # Construct dict forms of any model objects needed in order to build this model. + + response_generic_citation_ranges_item_model = {} # ResponseGenericCitationRangesItem + response_generic_citation_ranges_item_model['start'] = 38 + response_generic_citation_ranges_item_model['end'] = 38 + + response_generic_citation_model = {} # ResponseGenericCitation + response_generic_citation_model['title'] = 'testString' + response_generic_citation_model['text'] = 'testString' + response_generic_citation_model['body'] = 'testString' + response_generic_citation_model['search_result_index'] = 38 + response_generic_citation_model['ranges'] = [response_generic_citation_ranges_item_model] + + response_generic_confidence_scores_model = {} # ResponseGenericConfidenceScores + response_generic_confidence_scores_model['threshold'] = 72.5 + response_generic_confidence_scores_model['pre_gen'] = 72.5 + response_generic_confidence_scores_model['post_gen'] = 72.5 + response_generic_confidence_scores_model['extractiveness'] = 72.5 + + search_results_result_metadata_model = {} # SearchResultsResultMetadata + search_results_result_metadata_model['document_retrieval_source'] = 'testString' + search_results_result_metadata_model['score'] = 38 + + search_results_model = {} # SearchResults + search_results_model['result_metadata'] = search_results_result_metadata_model + search_results_model['id'] = 'testString' + search_results_model['title'] = 'testString' + search_results_model['body'] = 'testString' + + runtime_response_generic_model = {} # RuntimeResponseGenericRuntimeResponseTypeConversationalSearch + runtime_response_generic_model['response_type'] = 'conversation_search' + runtime_response_generic_model['text'] = 'testString' + runtime_response_generic_model['citations_title'] = 'testString' + runtime_response_generic_model['citations'] = [response_generic_citation_model] + runtime_response_generic_model['confidence_scores'] = response_generic_confidence_scores_model + runtime_response_generic_model['response_length_option'] = 'testString' + runtime_response_generic_model['search_results'] = [search_results_model] + runtime_response_generic_model['disclaimer'] = 'testString' + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + runtime_intent_model['skill'] = 'testString' + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + runtime_entity_model['skill'] = 'testString' + + dialog_node_action_model = {} # DialogNodeAction + dialog_node_action_model['name'] = 'testString' + dialog_node_action_model['type'] = 'client' + dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'} + dialog_node_action_model['result_variable'] = 'testString' + dialog_node_action_model['credentials'] = 'testString' + + dialog_node_visited_model = {} # DialogNodeVisited + dialog_node_visited_model['dialog_node'] = 'testString' + dialog_node_visited_model['title'] = 'testString' + dialog_node_visited_model['conditions'] = 'testString' + + log_message_source_model = {} # LogMessageSourceDialogNode + log_message_source_model['type'] = 'dialog_node' + log_message_source_model['dialog_node'] = 'testString' + + dialog_log_message_model = {} # DialogLogMessage + dialog_log_message_model['level'] = 'info' + dialog_log_message_model['message'] = 'testString' + dialog_log_message_model['code'] = 'testString' + dialog_log_message_model['source'] = log_message_source_model + + turn_event_action_source_model = {} # TurnEventActionSource + turn_event_action_source_model['type'] = 'action' + turn_event_action_source_model['action'] = 'testString' + turn_event_action_source_model['action_title'] = 'testString' + turn_event_action_source_model['condition'] = 'testString' + + message_output_debug_turn_event_model = {} # MessageOutputDebugTurnEventTurnEventActionVisited + message_output_debug_turn_event_model['event'] = 'action_visited' + message_output_debug_turn_event_model['source'] = turn_event_action_source_model + message_output_debug_turn_event_model['action_start_time'] = 'testString' + message_output_debug_turn_event_model['condition_type'] = 'user_defined' + message_output_debug_turn_event_model['reason'] = 'intent' + message_output_debug_turn_event_model['result_variable'] = 'testString' + + message_output_debug_model = {} # MessageOutputDebug + message_output_debug_model['nodes_visited'] = [dialog_node_visited_model] + message_output_debug_model['log_messages'] = [dialog_log_message_model] + message_output_debug_model['branch_exited'] = True + message_output_debug_model['branch_exited_reason'] = 'completed' + message_output_debug_model['turn_events'] = [message_output_debug_turn_event_model] + + message_output_spelling_model = {} # MessageOutputSpelling + message_output_spelling_model['text'] = 'testString' + message_output_spelling_model['original_text'] = 'testString' + message_output_spelling_model['suggested_text'] = 'testString' + + message_output_llm_metadata_model = {} # MessageOutputLLMMetadata + message_output_llm_metadata_model['task'] = 'testString' + message_output_llm_metadata_model['model_id'] = 'testString' + + # Construct a json representation of a MessageOutput model + message_output_model_json = {} + message_output_model_json['generic'] = [runtime_response_generic_model] + message_output_model_json['intents'] = [runtime_intent_model] + message_output_model_json['entities'] = [runtime_entity_model] + message_output_model_json['actions'] = [dialog_node_action_model] + message_output_model_json['debug'] = message_output_debug_model + message_output_model_json['user_defined'] = {'anyKey': 'anyValue'} + message_output_model_json['spelling'] = message_output_spelling_model + message_output_model_json['llm_metadata'] = [message_output_llm_metadata_model] + + # Construct a model instance of MessageOutput by calling from_dict on the json representation + message_output_model = MessageOutput.from_dict(message_output_model_json) + assert message_output_model != False + + # Construct a model instance of MessageOutput by calling from_dict on the json representation + message_output_model_dict = MessageOutput.from_dict(message_output_model_json).__dict__ + message_output_model2 = MessageOutput(**message_output_model_dict) + + # Verify the model instances are equivalent + assert message_output_model == message_output_model2 + + # Convert model instance back to dict and verify no loss of data + message_output_model_json2 = message_output_model.to_dict() + assert message_output_model_json2 == message_output_model_json + + +class TestModel_MessageOutputDebug: + """ + Test Class for MessageOutputDebug + """ + + def test_message_output_debug_serialization(self): + """ + Test serialization/deserialization for MessageOutputDebug + """ + + # Construct dict forms of any model objects needed in order to build this model. + + dialog_node_visited_model = {} # DialogNodeVisited + dialog_node_visited_model['dialog_node'] = 'testString' + dialog_node_visited_model['title'] = 'testString' + dialog_node_visited_model['conditions'] = 'testString' + + log_message_source_model = {} # LogMessageSourceDialogNode + log_message_source_model['type'] = 'dialog_node' + log_message_source_model['dialog_node'] = 'testString' + + dialog_log_message_model = {} # DialogLogMessage + dialog_log_message_model['level'] = 'info' + dialog_log_message_model['message'] = 'testString' + dialog_log_message_model['code'] = 'testString' + dialog_log_message_model['source'] = log_message_source_model + + turn_event_action_source_model = {} # TurnEventActionSource + turn_event_action_source_model['type'] = 'action' + turn_event_action_source_model['action'] = 'testString' + turn_event_action_source_model['action_title'] = 'testString' + turn_event_action_source_model['condition'] = 'testString' + + message_output_debug_turn_event_model = {} # MessageOutputDebugTurnEventTurnEventActionVisited + message_output_debug_turn_event_model['event'] = 'action_visited' + message_output_debug_turn_event_model['source'] = turn_event_action_source_model + message_output_debug_turn_event_model['action_start_time'] = 'testString' + message_output_debug_turn_event_model['condition_type'] = 'user_defined' + message_output_debug_turn_event_model['reason'] = 'intent' + message_output_debug_turn_event_model['result_variable'] = 'testString' + + # Construct a json representation of a MessageOutputDebug model + message_output_debug_model_json = {} + message_output_debug_model_json['nodes_visited'] = [dialog_node_visited_model] + message_output_debug_model_json['log_messages'] = [dialog_log_message_model] + message_output_debug_model_json['branch_exited'] = True + message_output_debug_model_json['branch_exited_reason'] = 'completed' + message_output_debug_model_json['turn_events'] = [message_output_debug_turn_event_model] + + # Construct a model instance of MessageOutputDebug by calling from_dict on the json representation + message_output_debug_model = MessageOutputDebug.from_dict(message_output_debug_model_json) + assert message_output_debug_model != False + + # Construct a model instance of MessageOutputDebug by calling from_dict on the json representation + message_output_debug_model_dict = MessageOutputDebug.from_dict(message_output_debug_model_json).__dict__ + message_output_debug_model2 = MessageOutputDebug(**message_output_debug_model_dict) + + # Verify the model instances are equivalent + assert message_output_debug_model == message_output_debug_model2 + + # Convert model instance back to dict and verify no loss of data + message_output_debug_model_json2 = message_output_debug_model.to_dict() + assert message_output_debug_model_json2 == message_output_debug_model_json + + +class TestModel_MessageOutputLLMMetadata: + """ + Test Class for MessageOutputLLMMetadata + """ + + def test_message_output_llm_metadata_serialization(self): + """ + Test serialization/deserialization for MessageOutputLLMMetadata + """ + + # Construct a json representation of a MessageOutputLLMMetadata model + message_output_llm_metadata_model_json = {} + message_output_llm_metadata_model_json['task'] = 'testString' + message_output_llm_metadata_model_json['model_id'] = 'testString' + + # Construct a model instance of MessageOutputLLMMetadata by calling from_dict on the json representation + message_output_llm_metadata_model = MessageOutputLLMMetadata.from_dict(message_output_llm_metadata_model_json) + assert message_output_llm_metadata_model != False + + # Construct a model instance of MessageOutputLLMMetadata by calling from_dict on the json representation + message_output_llm_metadata_model_dict = MessageOutputLLMMetadata.from_dict(message_output_llm_metadata_model_json).__dict__ + message_output_llm_metadata_model2 = MessageOutputLLMMetadata(**message_output_llm_metadata_model_dict) + + # Verify the model instances are equivalent + assert message_output_llm_metadata_model == message_output_llm_metadata_model2 + + # Convert model instance back to dict and verify no loss of data + message_output_llm_metadata_model_json2 = message_output_llm_metadata_model.to_dict() + assert message_output_llm_metadata_model_json2 == message_output_llm_metadata_model_json + + +class TestModel_MessageOutputSpelling: + """ + Test Class for MessageOutputSpelling + """ + + def test_message_output_spelling_serialization(self): + """ + Test serialization/deserialization for MessageOutputSpelling + """ + + # Construct a json representation of a MessageOutputSpelling model + message_output_spelling_model_json = {} + message_output_spelling_model_json['text'] = 'testString' + message_output_spelling_model_json['original_text'] = 'testString' + message_output_spelling_model_json['suggested_text'] = 'testString' + + # Construct a model instance of MessageOutputSpelling by calling from_dict on the json representation + message_output_spelling_model = MessageOutputSpelling.from_dict(message_output_spelling_model_json) + assert message_output_spelling_model != False + + # Construct a model instance of MessageOutputSpelling by calling from_dict on the json representation + message_output_spelling_model_dict = MessageOutputSpelling.from_dict(message_output_spelling_model_json).__dict__ + message_output_spelling_model2 = MessageOutputSpelling(**message_output_spelling_model_dict) + + # Verify the model instances are equivalent + assert message_output_spelling_model == message_output_spelling_model2 + + # Convert model instance back to dict and verify no loss of data + message_output_spelling_model_json2 = message_output_spelling_model.to_dict() + assert message_output_spelling_model_json2 == message_output_spelling_model_json + + +class TestModel_MessageStreamMetadata: + """ + Test Class for MessageStreamMetadata + """ + + def test_message_stream_metadata_serialization(self): + """ + Test serialization/deserialization for MessageStreamMetadata + """ + + # Construct dict forms of any model objects needed in order to build this model. + + metadata_model = {} # Metadata + metadata_model['id'] = 38 + + # Construct a json representation of a MessageStreamMetadata model + message_stream_metadata_model_json = {} + message_stream_metadata_model_json['streaming_metadata'] = metadata_model + + # Construct a model instance of MessageStreamMetadata by calling from_dict on the json representation + message_stream_metadata_model = MessageStreamMetadata.from_dict(message_stream_metadata_model_json) + assert message_stream_metadata_model != False + + # Construct a model instance of MessageStreamMetadata by calling from_dict on the json representation + message_stream_metadata_model_dict = MessageStreamMetadata.from_dict(message_stream_metadata_model_json).__dict__ + message_stream_metadata_model2 = MessageStreamMetadata(**message_stream_metadata_model_dict) + + # Verify the model instances are equivalent + assert message_stream_metadata_model == message_stream_metadata_model2 + + # Convert model instance back to dict and verify no loss of data + message_stream_metadata_model_json2 = message_stream_metadata_model.to_dict() + assert message_stream_metadata_model_json2 == message_stream_metadata_model_json + + +class TestModel_Metadata: + """ + Test Class for Metadata + """ + + def test_metadata_serialization(self): + """ + Test serialization/deserialization for Metadata + """ + + # Construct a json representation of a Metadata model + metadata_model_json = {} + metadata_model_json['id'] = 38 + + # Construct a model instance of Metadata by calling from_dict on the json representation + metadata_model = Metadata.from_dict(metadata_model_json) + assert metadata_model != False + + # Construct a model instance of Metadata by calling from_dict on the json representation + metadata_model_dict = Metadata.from_dict(metadata_model_json).__dict__ + metadata_model2 = Metadata(**metadata_model_dict) + + # Verify the model instances are equivalent + assert metadata_model == metadata_model2 + + # Convert model instance back to dict and verify no loss of data + metadata_model_json2 = metadata_model.to_dict() + assert metadata_model_json2 == metadata_model_json + + +class TestModel_MonitorAssistantReleaseImportArtifactResponse: + """ + Test Class for MonitorAssistantReleaseImportArtifactResponse + """ + + def test_monitor_assistant_release_import_artifact_response_serialization(self): + """ + Test serialization/deserialization for MonitorAssistantReleaseImportArtifactResponse + """ + + # Construct a json representation of a MonitorAssistantReleaseImportArtifactResponse model + monitor_assistant_release_import_artifact_response_model_json = {} + monitor_assistant_release_import_artifact_response_model_json['skill_impact_in_draft'] = ['action'] + + # Construct a model instance of MonitorAssistantReleaseImportArtifactResponse by calling from_dict on the json representation + monitor_assistant_release_import_artifact_response_model = MonitorAssistantReleaseImportArtifactResponse.from_dict(monitor_assistant_release_import_artifact_response_model_json) + assert monitor_assistant_release_import_artifact_response_model != False + + # Construct a model instance of MonitorAssistantReleaseImportArtifactResponse by calling from_dict on the json representation + monitor_assistant_release_import_artifact_response_model_dict = MonitorAssistantReleaseImportArtifactResponse.from_dict(monitor_assistant_release_import_artifact_response_model_json).__dict__ + monitor_assistant_release_import_artifact_response_model2 = MonitorAssistantReleaseImportArtifactResponse(**monitor_assistant_release_import_artifact_response_model_dict) + + # Verify the model instances are equivalent + assert monitor_assistant_release_import_artifact_response_model == monitor_assistant_release_import_artifact_response_model2 + + # Convert model instance back to dict and verify no loss of data + monitor_assistant_release_import_artifact_response_model_json2 = monitor_assistant_release_import_artifact_response_model.to_dict() + assert monitor_assistant_release_import_artifact_response_model_json2 == monitor_assistant_release_import_artifact_response_model_json + + +class TestModel_Pagination: + """ + Test Class for Pagination + """ + + def test_pagination_serialization(self): + """ + Test serialization/deserialization for Pagination + """ + + # Construct a json representation of a Pagination model + pagination_model_json = {} + pagination_model_json['refresh_url'] = 'testString' + pagination_model_json['next_url'] = 'testString' + pagination_model_json['total'] = 38 + pagination_model_json['matched'] = 38 + pagination_model_json['refresh_cursor'] = 'testString' + pagination_model_json['next_cursor'] = 'testString' + + # Construct a model instance of Pagination by calling from_dict on the json representation + pagination_model = Pagination.from_dict(pagination_model_json) + assert pagination_model != False + + # Construct a model instance of Pagination by calling from_dict on the json representation + pagination_model_dict = Pagination.from_dict(pagination_model_json).__dict__ + pagination_model2 = Pagination(**pagination_model_dict) + + # Verify the model instances are equivalent + assert pagination_model == pagination_model2 + + # Convert model instance back to dict and verify no loss of data + pagination_model_json2 = pagination_model.to_dict() + assert pagination_model_json2 == pagination_model_json + + +class TestModel_PartialItem: + """ + Test Class for PartialItem + """ + + def test_partial_item_serialization(self): + """ + Test serialization/deserialization for PartialItem + """ + + # Construct dict forms of any model objects needed in order to build this model. + + metadata_model = {} # Metadata + metadata_model['id'] = 38 + + # Construct a json representation of a PartialItem model + partial_item_model_json = {} + partial_item_model_json['response_type'] = 'testString' + partial_item_model_json['text'] = 'testString' + partial_item_model_json['streaming_metadata'] = metadata_model + + # Construct a model instance of PartialItem by calling from_dict on the json representation + partial_item_model = PartialItem.from_dict(partial_item_model_json) + assert partial_item_model != False + + # Construct a model instance of PartialItem by calling from_dict on the json representation + partial_item_model_dict = PartialItem.from_dict(partial_item_model_json).__dict__ + partial_item_model2 = PartialItem(**partial_item_model_dict) + + # Verify the model instances are equivalent + assert partial_item_model == partial_item_model2 + + # Convert model instance back to dict and verify no loss of data + partial_item_model_json2 = partial_item_model.to_dict() + assert partial_item_model_json2 == partial_item_model_json + + +class TestModel_ProviderAuthenticationOAuth2: + """ + Test Class for ProviderAuthenticationOAuth2 + """ + + def test_provider_authentication_o_auth2_serialization(self): + """ + Test serialization/deserialization for ProviderAuthenticationOAuth2 + """ + + # Construct dict forms of any model objects needed in order to build this model. + + provider_authentication_o_auth2_password_username_model = {} # ProviderAuthenticationOAuth2PasswordUsername + provider_authentication_o_auth2_password_username_model['type'] = 'value' + provider_authentication_o_auth2_password_username_model['value'] = 'testString' + + provider_authentication_o_auth2_flows_model = {} # ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password + provider_authentication_o_auth2_flows_model['token_url'] = 'testString' + provider_authentication_o_auth2_flows_model['refresh_url'] = 'testString' + provider_authentication_o_auth2_flows_model['client_auth_type'] = 'Body' + provider_authentication_o_auth2_flows_model['content_type'] = 'testString' + provider_authentication_o_auth2_flows_model['header_prefix'] = 'testString' + provider_authentication_o_auth2_flows_model['username'] = provider_authentication_o_auth2_password_username_model + + # Construct a json representation of a ProviderAuthenticationOAuth2 model + provider_authentication_o_auth2_model_json = {} + provider_authentication_o_auth2_model_json['preferred_flow'] = 'password' + provider_authentication_o_auth2_model_json['flows'] = provider_authentication_o_auth2_flows_model + + # Construct a model instance of ProviderAuthenticationOAuth2 by calling from_dict on the json representation + provider_authentication_o_auth2_model = ProviderAuthenticationOAuth2.from_dict(provider_authentication_o_auth2_model_json) + assert provider_authentication_o_auth2_model != False + + # Construct a model instance of ProviderAuthenticationOAuth2 by calling from_dict on the json representation + provider_authentication_o_auth2_model_dict = ProviderAuthenticationOAuth2.from_dict(provider_authentication_o_auth2_model_json).__dict__ + provider_authentication_o_auth2_model2 = ProviderAuthenticationOAuth2(**provider_authentication_o_auth2_model_dict) + + # Verify the model instances are equivalent + assert provider_authentication_o_auth2_model == provider_authentication_o_auth2_model2 + + # Convert model instance back to dict and verify no loss of data + provider_authentication_o_auth2_model_json2 = provider_authentication_o_auth2_model.to_dict() + assert provider_authentication_o_auth2_model_json2 == provider_authentication_o_auth2_model_json + + +class TestModel_ProviderAuthenticationOAuth2PasswordUsername: + """ + Test Class for ProviderAuthenticationOAuth2PasswordUsername + """ + + def test_provider_authentication_o_auth2_password_username_serialization(self): + """ + Test serialization/deserialization for ProviderAuthenticationOAuth2PasswordUsername + """ + + # Construct a json representation of a ProviderAuthenticationOAuth2PasswordUsername model + provider_authentication_o_auth2_password_username_model_json = {} + provider_authentication_o_auth2_password_username_model_json['type'] = 'value' + provider_authentication_o_auth2_password_username_model_json['value'] = 'testString' + + # Construct a model instance of ProviderAuthenticationOAuth2PasswordUsername by calling from_dict on the json representation + provider_authentication_o_auth2_password_username_model = ProviderAuthenticationOAuth2PasswordUsername.from_dict(provider_authentication_o_auth2_password_username_model_json) + assert provider_authentication_o_auth2_password_username_model != False + + # Construct a model instance of ProviderAuthenticationOAuth2PasswordUsername by calling from_dict on the json representation + provider_authentication_o_auth2_password_username_model_dict = ProviderAuthenticationOAuth2PasswordUsername.from_dict(provider_authentication_o_auth2_password_username_model_json).__dict__ + provider_authentication_o_auth2_password_username_model2 = ProviderAuthenticationOAuth2PasswordUsername(**provider_authentication_o_auth2_password_username_model_dict) + + # Verify the model instances are equivalent + assert provider_authentication_o_auth2_password_username_model == provider_authentication_o_auth2_password_username_model2 + + # Convert model instance back to dict and verify no loss of data + provider_authentication_o_auth2_password_username_model_json2 = provider_authentication_o_auth2_password_username_model.to_dict() + assert provider_authentication_o_auth2_password_username_model_json2 == provider_authentication_o_auth2_password_username_model_json + + +class TestModel_ProviderAuthenticationTypeAndValue: + """ + Test Class for ProviderAuthenticationTypeAndValue + """ + + def test_provider_authentication_type_and_value_serialization(self): + """ + Test serialization/deserialization for ProviderAuthenticationTypeAndValue + """ + + # Construct a json representation of a ProviderAuthenticationTypeAndValue model + provider_authentication_type_and_value_model_json = {} + provider_authentication_type_and_value_model_json['type'] = 'value' + provider_authentication_type_and_value_model_json['value'] = 'testString' + + # Construct a model instance of ProviderAuthenticationTypeAndValue by calling from_dict on the json representation + provider_authentication_type_and_value_model = ProviderAuthenticationTypeAndValue.from_dict(provider_authentication_type_and_value_model_json) + assert provider_authentication_type_and_value_model != False + + # Construct a model instance of ProviderAuthenticationTypeAndValue by calling from_dict on the json representation + provider_authentication_type_and_value_model_dict = ProviderAuthenticationTypeAndValue.from_dict(provider_authentication_type_and_value_model_json).__dict__ + provider_authentication_type_and_value_model2 = ProviderAuthenticationTypeAndValue(**provider_authentication_type_and_value_model_dict) + + # Verify the model instances are equivalent + assert provider_authentication_type_and_value_model == provider_authentication_type_and_value_model2 + + # Convert model instance back to dict and verify no loss of data + provider_authentication_type_and_value_model_json2 = provider_authentication_type_and_value_model.to_dict() + assert provider_authentication_type_and_value_model_json2 == provider_authentication_type_and_value_model_json + + +class TestModel_ProviderCollection: + """ + Test Class for ProviderCollection + """ + + def test_provider_collection_serialization(self): + """ + Test serialization/deserialization for ProviderCollection + """ + + # Construct dict forms of any model objects needed in order to build this model. + + provider_response_specification_servers_item_model = {} # ProviderResponseSpecificationServersItem + provider_response_specification_servers_item_model['url'] = 'testString' + + provider_authentication_type_and_value_model = {} # ProviderAuthenticationTypeAndValue + provider_authentication_type_and_value_model['type'] = 'value' + provider_authentication_type_and_value_model['value'] = 'testString' + + provider_response_specification_components_security_schemes_basic_model = {} # ProviderResponseSpecificationComponentsSecuritySchemesBasic + provider_response_specification_components_security_schemes_basic_model['username'] = provider_authentication_type_and_value_model + + provider_authentication_o_auth2_password_username_model = {} # ProviderAuthenticationOAuth2PasswordUsername + provider_authentication_o_auth2_password_username_model['type'] = 'value' + provider_authentication_o_auth2_password_username_model['value'] = 'testString' + + provider_authentication_o_auth2_flows_model = {} # ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password + provider_authentication_o_auth2_flows_model['token_url'] = 'testString' + provider_authentication_o_auth2_flows_model['refresh_url'] = 'testString' + provider_authentication_o_auth2_flows_model['client_auth_type'] = 'Body' + provider_authentication_o_auth2_flows_model['content_type'] = 'testString' + provider_authentication_o_auth2_flows_model['header_prefix'] = 'testString' + provider_authentication_o_auth2_flows_model['username'] = provider_authentication_o_auth2_password_username_model + + provider_authentication_o_auth2_model = {} # ProviderAuthenticationOAuth2 + provider_authentication_o_auth2_model['preferred_flow'] = 'password' + provider_authentication_o_auth2_model['flows'] = provider_authentication_o_auth2_flows_model + + provider_response_specification_components_security_schemes_model = {} # ProviderResponseSpecificationComponentsSecuritySchemes + provider_response_specification_components_security_schemes_model['authentication_method'] = 'basic' + provider_response_specification_components_security_schemes_model['basic'] = provider_response_specification_components_security_schemes_basic_model + provider_response_specification_components_security_schemes_model['oauth2'] = provider_authentication_o_auth2_model + + provider_response_specification_components_model = {} # ProviderResponseSpecificationComponents + provider_response_specification_components_model['securitySchemes'] = provider_response_specification_components_security_schemes_model + + provider_response_specification_model = {} # ProviderResponseSpecification + provider_response_specification_model['servers'] = [provider_response_specification_servers_item_model] + provider_response_specification_model['components'] = provider_response_specification_components_model + + provider_response_model = {} # ProviderResponse + provider_response_model['provider_id'] = 'testString' + provider_response_model['specification'] = provider_response_specification_model + + pagination_model = {} # Pagination + pagination_model['refresh_url'] = 'testString' + pagination_model['next_url'] = 'testString' + pagination_model['total'] = 38 + pagination_model['matched'] = 38 + pagination_model['refresh_cursor'] = 'testString' + pagination_model['next_cursor'] = 'testString' + + # Construct a json representation of a ProviderCollection model + provider_collection_model_json = {} + provider_collection_model_json['conversational_skill_providers'] = [provider_response_model] + provider_collection_model_json['pagination'] = pagination_model + + # Construct a model instance of ProviderCollection by calling from_dict on the json representation + provider_collection_model = ProviderCollection.from_dict(provider_collection_model_json) + assert provider_collection_model != False + + # Construct a model instance of ProviderCollection by calling from_dict on the json representation + provider_collection_model_dict = ProviderCollection.from_dict(provider_collection_model_json).__dict__ + provider_collection_model2 = ProviderCollection(**provider_collection_model_dict) + + # Verify the model instances are equivalent + assert provider_collection_model == provider_collection_model2 + + # Convert model instance back to dict and verify no loss of data + provider_collection_model_json2 = provider_collection_model.to_dict() + assert provider_collection_model_json2 == provider_collection_model_json + + +class TestModel_ProviderPrivate: + """ + Test Class for ProviderPrivate + """ + + def test_provider_private_serialization(self): + """ + Test serialization/deserialization for ProviderPrivate + """ + + # Construct dict forms of any model objects needed in order to build this model. + + provider_authentication_type_and_value_model = {} # ProviderAuthenticationTypeAndValue + provider_authentication_type_and_value_model['type'] = 'value' + provider_authentication_type_and_value_model['value'] = 'testString' + + provider_private_authentication_model = {} # ProviderPrivateAuthenticationBearerFlow + provider_private_authentication_model['token'] = provider_authentication_type_and_value_model + + # Construct a json representation of a ProviderPrivate model + provider_private_model_json = {} + provider_private_model_json['authentication'] = provider_private_authentication_model + + # Construct a model instance of ProviderPrivate by calling from_dict on the json representation + provider_private_model = ProviderPrivate.from_dict(provider_private_model_json) + assert provider_private_model != False + + # Construct a model instance of ProviderPrivate by calling from_dict on the json representation + provider_private_model_dict = ProviderPrivate.from_dict(provider_private_model_json).__dict__ + provider_private_model2 = ProviderPrivate(**provider_private_model_dict) + + # Verify the model instances are equivalent + assert provider_private_model == provider_private_model2 + + # Convert model instance back to dict and verify no loss of data + provider_private_model_json2 = provider_private_model.to_dict() + assert provider_private_model_json2 == provider_private_model_json + + +class TestModel_ProviderPrivateAuthenticationOAuth2PasswordPassword: + """ + Test Class for ProviderPrivateAuthenticationOAuth2PasswordPassword + """ + + def test_provider_private_authentication_o_auth2_password_password_serialization(self): + """ + Test serialization/deserialization for ProviderPrivateAuthenticationOAuth2PasswordPassword + """ + + # Construct a json representation of a ProviderPrivateAuthenticationOAuth2PasswordPassword model + provider_private_authentication_o_auth2_password_password_model_json = {} + provider_private_authentication_o_auth2_password_password_model_json['type'] = 'value' + provider_private_authentication_o_auth2_password_password_model_json['value'] = 'testString' + + # Construct a model instance of ProviderPrivateAuthenticationOAuth2PasswordPassword by calling from_dict on the json representation + provider_private_authentication_o_auth2_password_password_model = ProviderPrivateAuthenticationOAuth2PasswordPassword.from_dict(provider_private_authentication_o_auth2_password_password_model_json) + assert provider_private_authentication_o_auth2_password_password_model != False + + # Construct a model instance of ProviderPrivateAuthenticationOAuth2PasswordPassword by calling from_dict on the json representation + provider_private_authentication_o_auth2_password_password_model_dict = ProviderPrivateAuthenticationOAuth2PasswordPassword.from_dict(provider_private_authentication_o_auth2_password_password_model_json).__dict__ + provider_private_authentication_o_auth2_password_password_model2 = ProviderPrivateAuthenticationOAuth2PasswordPassword(**provider_private_authentication_o_auth2_password_password_model_dict) + + # Verify the model instances are equivalent + assert provider_private_authentication_o_auth2_password_password_model == provider_private_authentication_o_auth2_password_password_model2 + + # Convert model instance back to dict and verify no loss of data + provider_private_authentication_o_auth2_password_password_model_json2 = provider_private_authentication_o_auth2_password_password_model.to_dict() + assert provider_private_authentication_o_auth2_password_password_model_json2 == provider_private_authentication_o_auth2_password_password_model_json + + +class TestModel_ProviderResponse: + """ + Test Class for ProviderResponse + """ + + def test_provider_response_serialization(self): + """ + Test serialization/deserialization for ProviderResponse + """ + + # Construct dict forms of any model objects needed in order to build this model. + + provider_response_specification_servers_item_model = {} # ProviderResponseSpecificationServersItem + provider_response_specification_servers_item_model['url'] = 'testString' + + provider_authentication_type_and_value_model = {} # ProviderAuthenticationTypeAndValue + provider_authentication_type_and_value_model['type'] = 'value' + provider_authentication_type_and_value_model['value'] = 'testString' + + provider_response_specification_components_security_schemes_basic_model = {} # ProviderResponseSpecificationComponentsSecuritySchemesBasic + provider_response_specification_components_security_schemes_basic_model['username'] = provider_authentication_type_and_value_model + + provider_authentication_o_auth2_password_username_model = {} # ProviderAuthenticationOAuth2PasswordUsername + provider_authentication_o_auth2_password_username_model['type'] = 'value' + provider_authentication_o_auth2_password_username_model['value'] = 'testString' + + provider_authentication_o_auth2_flows_model = {} # ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password + provider_authentication_o_auth2_flows_model['token_url'] = 'testString' + provider_authentication_o_auth2_flows_model['refresh_url'] = 'testString' + provider_authentication_o_auth2_flows_model['client_auth_type'] = 'Body' + provider_authentication_o_auth2_flows_model['content_type'] = 'testString' + provider_authentication_o_auth2_flows_model['header_prefix'] = 'testString' + provider_authentication_o_auth2_flows_model['username'] = provider_authentication_o_auth2_password_username_model + + provider_authentication_o_auth2_model = {} # ProviderAuthenticationOAuth2 + provider_authentication_o_auth2_model['preferred_flow'] = 'password' + provider_authentication_o_auth2_model['flows'] = provider_authentication_o_auth2_flows_model + + provider_response_specification_components_security_schemes_model = {} # ProviderResponseSpecificationComponentsSecuritySchemes + provider_response_specification_components_security_schemes_model['authentication_method'] = 'basic' + provider_response_specification_components_security_schemes_model['basic'] = provider_response_specification_components_security_schemes_basic_model + provider_response_specification_components_security_schemes_model['oauth2'] = provider_authentication_o_auth2_model + + provider_response_specification_components_model = {} # ProviderResponseSpecificationComponents + provider_response_specification_components_model['securitySchemes'] = provider_response_specification_components_security_schemes_model + + provider_response_specification_model = {} # ProviderResponseSpecification + provider_response_specification_model['servers'] = [provider_response_specification_servers_item_model] + provider_response_specification_model['components'] = provider_response_specification_components_model + + # Construct a json representation of a ProviderResponse model + provider_response_model_json = {} + provider_response_model_json['provider_id'] = 'testString' + provider_response_model_json['specification'] = provider_response_specification_model + + # Construct a model instance of ProviderResponse by calling from_dict on the json representation + provider_response_model = ProviderResponse.from_dict(provider_response_model_json) + assert provider_response_model != False + + # Construct a model instance of ProviderResponse by calling from_dict on the json representation + provider_response_model_dict = ProviderResponse.from_dict(provider_response_model_json).__dict__ + provider_response_model2 = ProviderResponse(**provider_response_model_dict) + + # Verify the model instances are equivalent + assert provider_response_model == provider_response_model2 + + # Convert model instance back to dict and verify no loss of data + provider_response_model_json2 = provider_response_model.to_dict() + assert provider_response_model_json2 == provider_response_model_json + + +class TestModel_ProviderResponseSpecification: + """ + Test Class for ProviderResponseSpecification + """ + + def test_provider_response_specification_serialization(self): + """ + Test serialization/deserialization for ProviderResponseSpecification + """ + + # Construct dict forms of any model objects needed in order to build this model. + + provider_response_specification_servers_item_model = {} # ProviderResponseSpecificationServersItem + provider_response_specification_servers_item_model['url'] = 'testString' + + provider_authentication_type_and_value_model = {} # ProviderAuthenticationTypeAndValue + provider_authentication_type_and_value_model['type'] = 'value' + provider_authentication_type_and_value_model['value'] = 'testString' + + provider_response_specification_components_security_schemes_basic_model = {} # ProviderResponseSpecificationComponentsSecuritySchemesBasic + provider_response_specification_components_security_schemes_basic_model['username'] = provider_authentication_type_and_value_model + + provider_authentication_o_auth2_password_username_model = {} # ProviderAuthenticationOAuth2PasswordUsername + provider_authentication_o_auth2_password_username_model['type'] = 'value' + provider_authentication_o_auth2_password_username_model['value'] = 'testString' + + provider_authentication_o_auth2_flows_model = {} # ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password + provider_authentication_o_auth2_flows_model['token_url'] = 'testString' + provider_authentication_o_auth2_flows_model['refresh_url'] = 'testString' + provider_authentication_o_auth2_flows_model['client_auth_type'] = 'Body' + provider_authentication_o_auth2_flows_model['content_type'] = 'testString' + provider_authentication_o_auth2_flows_model['header_prefix'] = 'testString' + provider_authentication_o_auth2_flows_model['username'] = provider_authentication_o_auth2_password_username_model + + provider_authentication_o_auth2_model = {} # ProviderAuthenticationOAuth2 + provider_authentication_o_auth2_model['preferred_flow'] = 'password' + provider_authentication_o_auth2_model['flows'] = provider_authentication_o_auth2_flows_model + + provider_response_specification_components_security_schemes_model = {} # ProviderResponseSpecificationComponentsSecuritySchemes + provider_response_specification_components_security_schemes_model['authentication_method'] = 'basic' + provider_response_specification_components_security_schemes_model['basic'] = provider_response_specification_components_security_schemes_basic_model + provider_response_specification_components_security_schemes_model['oauth2'] = provider_authentication_o_auth2_model + + provider_response_specification_components_model = {} # ProviderResponseSpecificationComponents + provider_response_specification_components_model['securitySchemes'] = provider_response_specification_components_security_schemes_model + + # Construct a json representation of a ProviderResponseSpecification model + provider_response_specification_model_json = {} + provider_response_specification_model_json['servers'] = [provider_response_specification_servers_item_model] + provider_response_specification_model_json['components'] = provider_response_specification_components_model + + # Construct a model instance of ProviderResponseSpecification by calling from_dict on the json representation + provider_response_specification_model = ProviderResponseSpecification.from_dict(provider_response_specification_model_json) + assert provider_response_specification_model != False + + # Construct a model instance of ProviderResponseSpecification by calling from_dict on the json representation + provider_response_specification_model_dict = ProviderResponseSpecification.from_dict(provider_response_specification_model_json).__dict__ + provider_response_specification_model2 = ProviderResponseSpecification(**provider_response_specification_model_dict) + + # Verify the model instances are equivalent + assert provider_response_specification_model == provider_response_specification_model2 + + # Convert model instance back to dict and verify no loss of data + provider_response_specification_model_json2 = provider_response_specification_model.to_dict() + assert provider_response_specification_model_json2 == provider_response_specification_model_json + + +class TestModel_ProviderResponseSpecificationComponents: + """ + Test Class for ProviderResponseSpecificationComponents + """ + + def test_provider_response_specification_components_serialization(self): + """ + Test serialization/deserialization for ProviderResponseSpecificationComponents + """ + + # Construct dict forms of any model objects needed in order to build this model. + + provider_authentication_type_and_value_model = {} # ProviderAuthenticationTypeAndValue + provider_authentication_type_and_value_model['type'] = 'value' + provider_authentication_type_and_value_model['value'] = 'testString' + + provider_response_specification_components_security_schemes_basic_model = {} # ProviderResponseSpecificationComponentsSecuritySchemesBasic + provider_response_specification_components_security_schemes_basic_model['username'] = provider_authentication_type_and_value_model + + provider_authentication_o_auth2_password_username_model = {} # ProviderAuthenticationOAuth2PasswordUsername + provider_authentication_o_auth2_password_username_model['type'] = 'value' + provider_authentication_o_auth2_password_username_model['value'] = 'testString' + + provider_authentication_o_auth2_flows_model = {} # ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password + provider_authentication_o_auth2_flows_model['token_url'] = 'testString' + provider_authentication_o_auth2_flows_model['refresh_url'] = 'testString' + provider_authentication_o_auth2_flows_model['client_auth_type'] = 'Body' + provider_authentication_o_auth2_flows_model['content_type'] = 'testString' + provider_authentication_o_auth2_flows_model['header_prefix'] = 'testString' + provider_authentication_o_auth2_flows_model['username'] = provider_authentication_o_auth2_password_username_model + + provider_authentication_o_auth2_model = {} # ProviderAuthenticationOAuth2 + provider_authentication_o_auth2_model['preferred_flow'] = 'password' + provider_authentication_o_auth2_model['flows'] = provider_authentication_o_auth2_flows_model + + provider_response_specification_components_security_schemes_model = {} # ProviderResponseSpecificationComponentsSecuritySchemes + provider_response_specification_components_security_schemes_model['authentication_method'] = 'basic' + provider_response_specification_components_security_schemes_model['basic'] = provider_response_specification_components_security_schemes_basic_model + provider_response_specification_components_security_schemes_model['oauth2'] = provider_authentication_o_auth2_model + + # Construct a json representation of a ProviderResponseSpecificationComponents model + provider_response_specification_components_model_json = {} + provider_response_specification_components_model_json['securitySchemes'] = provider_response_specification_components_security_schemes_model + + # Construct a model instance of ProviderResponseSpecificationComponents by calling from_dict on the json representation + provider_response_specification_components_model = ProviderResponseSpecificationComponents.from_dict(provider_response_specification_components_model_json) + assert provider_response_specification_components_model != False + + # Construct a model instance of ProviderResponseSpecificationComponents by calling from_dict on the json representation + provider_response_specification_components_model_dict = ProviderResponseSpecificationComponents.from_dict(provider_response_specification_components_model_json).__dict__ + provider_response_specification_components_model2 = ProviderResponseSpecificationComponents(**provider_response_specification_components_model_dict) + + # Verify the model instances are equivalent + assert provider_response_specification_components_model == provider_response_specification_components_model2 + + # Convert model instance back to dict and verify no loss of data + provider_response_specification_components_model_json2 = provider_response_specification_components_model.to_dict() + assert provider_response_specification_components_model_json2 == provider_response_specification_components_model_json + + +class TestModel_ProviderResponseSpecificationComponentsSecuritySchemes: + """ + Test Class for ProviderResponseSpecificationComponentsSecuritySchemes + """ + + def test_provider_response_specification_components_security_schemes_serialization(self): + """ + Test serialization/deserialization for ProviderResponseSpecificationComponentsSecuritySchemes + """ + + # Construct dict forms of any model objects needed in order to build this model. + + provider_authentication_type_and_value_model = {} # ProviderAuthenticationTypeAndValue + provider_authentication_type_and_value_model['type'] = 'value' + provider_authentication_type_and_value_model['value'] = 'testString' + + provider_response_specification_components_security_schemes_basic_model = {} # ProviderResponseSpecificationComponentsSecuritySchemesBasic + provider_response_specification_components_security_schemes_basic_model['username'] = provider_authentication_type_and_value_model + + provider_authentication_o_auth2_password_username_model = {} # ProviderAuthenticationOAuth2PasswordUsername + provider_authentication_o_auth2_password_username_model['type'] = 'value' + provider_authentication_o_auth2_password_username_model['value'] = 'testString' + + provider_authentication_o_auth2_flows_model = {} # ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password + provider_authentication_o_auth2_flows_model['token_url'] = 'testString' + provider_authentication_o_auth2_flows_model['refresh_url'] = 'testString' + provider_authentication_o_auth2_flows_model['client_auth_type'] = 'Body' + provider_authentication_o_auth2_flows_model['content_type'] = 'testString' + provider_authentication_o_auth2_flows_model['header_prefix'] = 'testString' + provider_authentication_o_auth2_flows_model['username'] = provider_authentication_o_auth2_password_username_model + + provider_authentication_o_auth2_model = {} # ProviderAuthenticationOAuth2 + provider_authentication_o_auth2_model['preferred_flow'] = 'password' + provider_authentication_o_auth2_model['flows'] = provider_authentication_o_auth2_flows_model + + # Construct a json representation of a ProviderResponseSpecificationComponentsSecuritySchemes model + provider_response_specification_components_security_schemes_model_json = {} + provider_response_specification_components_security_schemes_model_json['authentication_method'] = 'basic' + provider_response_specification_components_security_schemes_model_json['basic'] = provider_response_specification_components_security_schemes_basic_model + provider_response_specification_components_security_schemes_model_json['oauth2'] = provider_authentication_o_auth2_model + + # Construct a model instance of ProviderResponseSpecificationComponentsSecuritySchemes by calling from_dict on the json representation + provider_response_specification_components_security_schemes_model = ProviderResponseSpecificationComponentsSecuritySchemes.from_dict(provider_response_specification_components_security_schemes_model_json) + assert provider_response_specification_components_security_schemes_model != False + + # Construct a model instance of ProviderResponseSpecificationComponentsSecuritySchemes by calling from_dict on the json representation + provider_response_specification_components_security_schemes_model_dict = ProviderResponseSpecificationComponentsSecuritySchemes.from_dict(provider_response_specification_components_security_schemes_model_json).__dict__ + provider_response_specification_components_security_schemes_model2 = ProviderResponseSpecificationComponentsSecuritySchemes(**provider_response_specification_components_security_schemes_model_dict) + + # Verify the model instances are equivalent + assert provider_response_specification_components_security_schemes_model == provider_response_specification_components_security_schemes_model2 + + # Convert model instance back to dict and verify no loss of data + provider_response_specification_components_security_schemes_model_json2 = provider_response_specification_components_security_schemes_model.to_dict() + assert provider_response_specification_components_security_schemes_model_json2 == provider_response_specification_components_security_schemes_model_json + + +class TestModel_ProviderResponseSpecificationComponentsSecuritySchemesBasic: + """ + Test Class for ProviderResponseSpecificationComponentsSecuritySchemesBasic + """ + + def test_provider_response_specification_components_security_schemes_basic_serialization(self): + """ + Test serialization/deserialization for ProviderResponseSpecificationComponentsSecuritySchemesBasic + """ + + # Construct dict forms of any model objects needed in order to build this model. + + provider_authentication_type_and_value_model = {} # ProviderAuthenticationTypeAndValue + provider_authentication_type_and_value_model['type'] = 'value' + provider_authentication_type_and_value_model['value'] = 'testString' + + # Construct a json representation of a ProviderResponseSpecificationComponentsSecuritySchemesBasic model + provider_response_specification_components_security_schemes_basic_model_json = {} + provider_response_specification_components_security_schemes_basic_model_json['username'] = provider_authentication_type_and_value_model + + # Construct a model instance of ProviderResponseSpecificationComponentsSecuritySchemesBasic by calling from_dict on the json representation + provider_response_specification_components_security_schemes_basic_model = ProviderResponseSpecificationComponentsSecuritySchemesBasic.from_dict(provider_response_specification_components_security_schemes_basic_model_json) + assert provider_response_specification_components_security_schemes_basic_model != False + + # Construct a model instance of ProviderResponseSpecificationComponentsSecuritySchemesBasic by calling from_dict on the json representation + provider_response_specification_components_security_schemes_basic_model_dict = ProviderResponseSpecificationComponentsSecuritySchemesBasic.from_dict(provider_response_specification_components_security_schemes_basic_model_json).__dict__ + provider_response_specification_components_security_schemes_basic_model2 = ProviderResponseSpecificationComponentsSecuritySchemesBasic(**provider_response_specification_components_security_schemes_basic_model_dict) + + # Verify the model instances are equivalent + assert provider_response_specification_components_security_schemes_basic_model == provider_response_specification_components_security_schemes_basic_model2 + + # Convert model instance back to dict and verify no loss of data + provider_response_specification_components_security_schemes_basic_model_json2 = provider_response_specification_components_security_schemes_basic_model.to_dict() + assert provider_response_specification_components_security_schemes_basic_model_json2 == provider_response_specification_components_security_schemes_basic_model_json + + +class TestModel_ProviderResponseSpecificationServersItem: + """ + Test Class for ProviderResponseSpecificationServersItem + """ + + def test_provider_response_specification_servers_item_serialization(self): + """ + Test serialization/deserialization for ProviderResponseSpecificationServersItem + """ + + # Construct a json representation of a ProviderResponseSpecificationServersItem model + provider_response_specification_servers_item_model_json = {} + provider_response_specification_servers_item_model_json['url'] = 'testString' + + # Construct a model instance of ProviderResponseSpecificationServersItem by calling from_dict on the json representation + provider_response_specification_servers_item_model = ProviderResponseSpecificationServersItem.from_dict(provider_response_specification_servers_item_model_json) + assert provider_response_specification_servers_item_model != False + + # Construct a model instance of ProviderResponseSpecificationServersItem by calling from_dict on the json representation + provider_response_specification_servers_item_model_dict = ProviderResponseSpecificationServersItem.from_dict(provider_response_specification_servers_item_model_json).__dict__ + provider_response_specification_servers_item_model2 = ProviderResponseSpecificationServersItem(**provider_response_specification_servers_item_model_dict) + + # Verify the model instances are equivalent + assert provider_response_specification_servers_item_model == provider_response_specification_servers_item_model2 + + # Convert model instance back to dict and verify no loss of data + provider_response_specification_servers_item_model_json2 = provider_response_specification_servers_item_model.to_dict() + assert provider_response_specification_servers_item_model_json2 == provider_response_specification_servers_item_model_json + + +class TestModel_ProviderSpecification: + """ + Test Class for ProviderSpecification + """ + + def test_provider_specification_serialization(self): + """ + Test serialization/deserialization for ProviderSpecification + """ + + # Construct dict forms of any model objects needed in order to build this model. + + provider_specification_servers_item_model = {} # ProviderSpecificationServersItem + provider_specification_servers_item_model['url'] = 'testString' + + provider_authentication_type_and_value_model = {} # ProviderAuthenticationTypeAndValue + provider_authentication_type_and_value_model['type'] = 'value' + provider_authentication_type_and_value_model['value'] = 'testString' + + provider_specification_components_security_schemes_basic_model = {} # ProviderSpecificationComponentsSecuritySchemesBasic + provider_specification_components_security_schemes_basic_model['username'] = provider_authentication_type_and_value_model + + provider_authentication_o_auth2_password_username_model = {} # ProviderAuthenticationOAuth2PasswordUsername + provider_authentication_o_auth2_password_username_model['type'] = 'value' + provider_authentication_o_auth2_password_username_model['value'] = 'testString' + + provider_authentication_o_auth2_flows_model = {} # ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password + provider_authentication_o_auth2_flows_model['token_url'] = 'testString' + provider_authentication_o_auth2_flows_model['refresh_url'] = 'testString' + provider_authentication_o_auth2_flows_model['client_auth_type'] = 'Body' + provider_authentication_o_auth2_flows_model['content_type'] = 'testString' + provider_authentication_o_auth2_flows_model['header_prefix'] = 'testString' + provider_authentication_o_auth2_flows_model['username'] = provider_authentication_o_auth2_password_username_model + + provider_authentication_o_auth2_model = {} # ProviderAuthenticationOAuth2 + provider_authentication_o_auth2_model['preferred_flow'] = 'password' + provider_authentication_o_auth2_model['flows'] = provider_authentication_o_auth2_flows_model + + provider_specification_components_security_schemes_model = {} # ProviderSpecificationComponentsSecuritySchemes + provider_specification_components_security_schemes_model['authentication_method'] = 'basic' + provider_specification_components_security_schemes_model['basic'] = provider_specification_components_security_schemes_basic_model + provider_specification_components_security_schemes_model['oauth2'] = provider_authentication_o_auth2_model + + provider_specification_components_model = {} # ProviderSpecificationComponents + provider_specification_components_model['securitySchemes'] = provider_specification_components_security_schemes_model + + # Construct a json representation of a ProviderSpecification model + provider_specification_model_json = {} + provider_specification_model_json['servers'] = [provider_specification_servers_item_model] + provider_specification_model_json['components'] = provider_specification_components_model + + # Construct a model instance of ProviderSpecification by calling from_dict on the json representation + provider_specification_model = ProviderSpecification.from_dict(provider_specification_model_json) + assert provider_specification_model != False + + # Construct a model instance of ProviderSpecification by calling from_dict on the json representation + provider_specification_model_dict = ProviderSpecification.from_dict(provider_specification_model_json).__dict__ + provider_specification_model2 = ProviderSpecification(**provider_specification_model_dict) + + # Verify the model instances are equivalent + assert provider_specification_model == provider_specification_model2 + + # Convert model instance back to dict and verify no loss of data + provider_specification_model_json2 = provider_specification_model.to_dict() + assert provider_specification_model_json2 == provider_specification_model_json + + +class TestModel_ProviderSpecificationComponents: + """ + Test Class for ProviderSpecificationComponents + """ + + def test_provider_specification_components_serialization(self): + """ + Test serialization/deserialization for ProviderSpecificationComponents + """ + + # Construct dict forms of any model objects needed in order to build this model. + + provider_authentication_type_and_value_model = {} # ProviderAuthenticationTypeAndValue + provider_authentication_type_and_value_model['type'] = 'value' + provider_authentication_type_and_value_model['value'] = 'testString' + + provider_specification_components_security_schemes_basic_model = {} # ProviderSpecificationComponentsSecuritySchemesBasic + provider_specification_components_security_schemes_basic_model['username'] = provider_authentication_type_and_value_model + + provider_authentication_o_auth2_password_username_model = {} # ProviderAuthenticationOAuth2PasswordUsername + provider_authentication_o_auth2_password_username_model['type'] = 'value' + provider_authentication_o_auth2_password_username_model['value'] = 'testString' + + provider_authentication_o_auth2_flows_model = {} # ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password + provider_authentication_o_auth2_flows_model['token_url'] = 'testString' + provider_authentication_o_auth2_flows_model['refresh_url'] = 'testString' + provider_authentication_o_auth2_flows_model['client_auth_type'] = 'Body' + provider_authentication_o_auth2_flows_model['content_type'] = 'testString' + provider_authentication_o_auth2_flows_model['header_prefix'] = 'testString' + provider_authentication_o_auth2_flows_model['username'] = provider_authentication_o_auth2_password_username_model + + provider_authentication_o_auth2_model = {} # ProviderAuthenticationOAuth2 + provider_authentication_o_auth2_model['preferred_flow'] = 'password' + provider_authentication_o_auth2_model['flows'] = provider_authentication_o_auth2_flows_model + + provider_specification_components_security_schemes_model = {} # ProviderSpecificationComponentsSecuritySchemes + provider_specification_components_security_schemes_model['authentication_method'] = 'basic' + provider_specification_components_security_schemes_model['basic'] = provider_specification_components_security_schemes_basic_model + provider_specification_components_security_schemes_model['oauth2'] = provider_authentication_o_auth2_model + + # Construct a json representation of a ProviderSpecificationComponents model + provider_specification_components_model_json = {} + provider_specification_components_model_json['securitySchemes'] = provider_specification_components_security_schemes_model + + # Construct a model instance of ProviderSpecificationComponents by calling from_dict on the json representation + provider_specification_components_model = ProviderSpecificationComponents.from_dict(provider_specification_components_model_json) + assert provider_specification_components_model != False + + # Construct a model instance of ProviderSpecificationComponents by calling from_dict on the json representation + provider_specification_components_model_dict = ProviderSpecificationComponents.from_dict(provider_specification_components_model_json).__dict__ + provider_specification_components_model2 = ProviderSpecificationComponents(**provider_specification_components_model_dict) + + # Verify the model instances are equivalent + assert provider_specification_components_model == provider_specification_components_model2 + + # Convert model instance back to dict and verify no loss of data + provider_specification_components_model_json2 = provider_specification_components_model.to_dict() + assert provider_specification_components_model_json2 == provider_specification_components_model_json + + +class TestModel_ProviderSpecificationComponentsSecuritySchemes: + """ + Test Class for ProviderSpecificationComponentsSecuritySchemes + """ + + def test_provider_specification_components_security_schemes_serialization(self): + """ + Test serialization/deserialization for ProviderSpecificationComponentsSecuritySchemes + """ + + # Construct dict forms of any model objects needed in order to build this model. + + provider_authentication_type_and_value_model = {} # ProviderAuthenticationTypeAndValue + provider_authentication_type_and_value_model['type'] = 'value' + provider_authentication_type_and_value_model['value'] = 'testString' + + provider_specification_components_security_schemes_basic_model = {} # ProviderSpecificationComponentsSecuritySchemesBasic + provider_specification_components_security_schemes_basic_model['username'] = provider_authentication_type_and_value_model + + provider_authentication_o_auth2_password_username_model = {} # ProviderAuthenticationOAuth2PasswordUsername + provider_authentication_o_auth2_password_username_model['type'] = 'value' + provider_authentication_o_auth2_password_username_model['value'] = 'testString' + + provider_authentication_o_auth2_flows_model = {} # ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password + provider_authentication_o_auth2_flows_model['token_url'] = 'testString' + provider_authentication_o_auth2_flows_model['refresh_url'] = 'testString' + provider_authentication_o_auth2_flows_model['client_auth_type'] = 'Body' + provider_authentication_o_auth2_flows_model['content_type'] = 'testString' + provider_authentication_o_auth2_flows_model['header_prefix'] = 'testString' + provider_authentication_o_auth2_flows_model['username'] = provider_authentication_o_auth2_password_username_model + + provider_authentication_o_auth2_model = {} # ProviderAuthenticationOAuth2 + provider_authentication_o_auth2_model['preferred_flow'] = 'password' + provider_authentication_o_auth2_model['flows'] = provider_authentication_o_auth2_flows_model + + # Construct a json representation of a ProviderSpecificationComponentsSecuritySchemes model + provider_specification_components_security_schemes_model_json = {} + provider_specification_components_security_schemes_model_json['authentication_method'] = 'basic' + provider_specification_components_security_schemes_model_json['basic'] = provider_specification_components_security_schemes_basic_model + provider_specification_components_security_schemes_model_json['oauth2'] = provider_authentication_o_auth2_model + + # Construct a model instance of ProviderSpecificationComponentsSecuritySchemes by calling from_dict on the json representation + provider_specification_components_security_schemes_model = ProviderSpecificationComponentsSecuritySchemes.from_dict(provider_specification_components_security_schemes_model_json) + assert provider_specification_components_security_schemes_model != False + + # Construct a model instance of ProviderSpecificationComponentsSecuritySchemes by calling from_dict on the json representation + provider_specification_components_security_schemes_model_dict = ProviderSpecificationComponentsSecuritySchemes.from_dict(provider_specification_components_security_schemes_model_json).__dict__ + provider_specification_components_security_schemes_model2 = ProviderSpecificationComponentsSecuritySchemes(**provider_specification_components_security_schemes_model_dict) + + # Verify the model instances are equivalent + assert provider_specification_components_security_schemes_model == provider_specification_components_security_schemes_model2 + + # Convert model instance back to dict and verify no loss of data + provider_specification_components_security_schemes_model_json2 = provider_specification_components_security_schemes_model.to_dict() + assert provider_specification_components_security_schemes_model_json2 == provider_specification_components_security_schemes_model_json + + +class TestModel_ProviderSpecificationComponentsSecuritySchemesBasic: + """ + Test Class for ProviderSpecificationComponentsSecuritySchemesBasic + """ + + def test_provider_specification_components_security_schemes_basic_serialization(self): + """ + Test serialization/deserialization for ProviderSpecificationComponentsSecuritySchemesBasic + """ + + # Construct dict forms of any model objects needed in order to build this model. + + provider_authentication_type_and_value_model = {} # ProviderAuthenticationTypeAndValue + provider_authentication_type_and_value_model['type'] = 'value' + provider_authentication_type_and_value_model['value'] = 'testString' + + # Construct a json representation of a ProviderSpecificationComponentsSecuritySchemesBasic model + provider_specification_components_security_schemes_basic_model_json = {} + provider_specification_components_security_schemes_basic_model_json['username'] = provider_authentication_type_and_value_model + + # Construct a model instance of ProviderSpecificationComponentsSecuritySchemesBasic by calling from_dict on the json representation + provider_specification_components_security_schemes_basic_model = ProviderSpecificationComponentsSecuritySchemesBasic.from_dict(provider_specification_components_security_schemes_basic_model_json) + assert provider_specification_components_security_schemes_basic_model != False + + # Construct a model instance of ProviderSpecificationComponentsSecuritySchemesBasic by calling from_dict on the json representation + provider_specification_components_security_schemes_basic_model_dict = ProviderSpecificationComponentsSecuritySchemesBasic.from_dict(provider_specification_components_security_schemes_basic_model_json).__dict__ + provider_specification_components_security_schemes_basic_model2 = ProviderSpecificationComponentsSecuritySchemesBasic(**provider_specification_components_security_schemes_basic_model_dict) + + # Verify the model instances are equivalent + assert provider_specification_components_security_schemes_basic_model == provider_specification_components_security_schemes_basic_model2 + + # Convert model instance back to dict and verify no loss of data + provider_specification_components_security_schemes_basic_model_json2 = provider_specification_components_security_schemes_basic_model.to_dict() + assert provider_specification_components_security_schemes_basic_model_json2 == provider_specification_components_security_schemes_basic_model_json + + +class TestModel_ProviderSpecificationServersItem: + """ + Test Class for ProviderSpecificationServersItem + """ + + def test_provider_specification_servers_item_serialization(self): + """ + Test serialization/deserialization for ProviderSpecificationServersItem + """ + + # Construct a json representation of a ProviderSpecificationServersItem model + provider_specification_servers_item_model_json = {} + provider_specification_servers_item_model_json['url'] = 'testString' + + # Construct a model instance of ProviderSpecificationServersItem by calling from_dict on the json representation + provider_specification_servers_item_model = ProviderSpecificationServersItem.from_dict(provider_specification_servers_item_model_json) + assert provider_specification_servers_item_model != False + + # Construct a model instance of ProviderSpecificationServersItem by calling from_dict on the json representation + provider_specification_servers_item_model_dict = ProviderSpecificationServersItem.from_dict(provider_specification_servers_item_model_json).__dict__ + provider_specification_servers_item_model2 = ProviderSpecificationServersItem(**provider_specification_servers_item_model_dict) + + # Verify the model instances are equivalent + assert provider_specification_servers_item_model == provider_specification_servers_item_model2 + + # Convert model instance back to dict and verify no loss of data + provider_specification_servers_item_model_json2 = provider_specification_servers_item_model.to_dict() + assert provider_specification_servers_item_model_json2 == provider_specification_servers_item_model_json + + +class TestModel_Release: + """ + Test Class for Release + """ + + def test_release_serialization(self): + """ + Test serialization/deserialization for Release + """ + + # Construct a json representation of a Release model + release_model_json = {} + release_model_json['description'] = 'testString' + + # Construct a model instance of Release by calling from_dict on the json representation + release_model = Release.from_dict(release_model_json) + assert release_model != False + + # Construct a model instance of Release by calling from_dict on the json representation + release_model_dict = Release.from_dict(release_model_json).__dict__ + release_model2 = Release(**release_model_dict) + + # Verify the model instances are equivalent + assert release_model == release_model2 + + # Convert model instance back to dict and verify no loss of data + release_model_json2 = release_model.to_dict() + assert release_model_json2 == release_model_json + + +class TestModel_ReleaseCollection: + """ + Test Class for ReleaseCollection + """ + + def test_release_collection_serialization(self): + """ + Test serialization/deserialization for ReleaseCollection + """ + + # Construct dict forms of any model objects needed in order to build this model. + + release_model = {} # Release + release_model['description'] = 'testString' + + pagination_model = {} # Pagination + pagination_model['refresh_url'] = 'testString' + pagination_model['next_url'] = 'testString' + pagination_model['total'] = 38 + pagination_model['matched'] = 38 + pagination_model['refresh_cursor'] = 'testString' + pagination_model['next_cursor'] = 'testString' + + # Construct a json representation of a ReleaseCollection model + release_collection_model_json = {} + release_collection_model_json['releases'] = [release_model] + release_collection_model_json['pagination'] = pagination_model + + # Construct a model instance of ReleaseCollection by calling from_dict on the json representation + release_collection_model = ReleaseCollection.from_dict(release_collection_model_json) + assert release_collection_model != False + + # Construct a model instance of ReleaseCollection by calling from_dict on the json representation + release_collection_model_dict = ReleaseCollection.from_dict(release_collection_model_json).__dict__ + release_collection_model2 = ReleaseCollection(**release_collection_model_dict) + + # Verify the model instances are equivalent + assert release_collection_model == release_collection_model2 + + # Convert model instance back to dict and verify no loss of data + release_collection_model_json2 = release_collection_model.to_dict() + assert release_collection_model_json2 == release_collection_model_json + + +class TestModel_ReleaseContent: + """ + Test Class for ReleaseContent + """ + + def test_release_content_serialization(self): + """ + Test serialization/deserialization for ReleaseContent + """ + + # Construct a json representation of a ReleaseContent model + release_content_model_json = {} + + # Construct a model instance of ReleaseContent by calling from_dict on the json representation + release_content_model = ReleaseContent.from_dict(release_content_model_json) + assert release_content_model != False + + # Construct a model instance of ReleaseContent by calling from_dict on the json representation + release_content_model_dict = ReleaseContent.from_dict(release_content_model_json).__dict__ + release_content_model2 = ReleaseContent(**release_content_model_dict) + + # Verify the model instances are equivalent + assert release_content_model == release_content_model2 + + # Convert model instance back to dict and verify no loss of data + release_content_model_json2 = release_content_model.to_dict() + assert release_content_model_json2 == release_content_model_json + + +class TestModel_ReleaseSkill: + """ + Test Class for ReleaseSkill + """ + + def test_release_skill_serialization(self): + """ + Test serialization/deserialization for ReleaseSkill + """ + + # Construct a json representation of a ReleaseSkill model + release_skill_model_json = {} + release_skill_model_json['skill_id'] = 'testString' + release_skill_model_json['type'] = 'dialog' + release_skill_model_json['snapshot'] = 'testString' + + # Construct a model instance of ReleaseSkill by calling from_dict on the json representation + release_skill_model = ReleaseSkill.from_dict(release_skill_model_json) + assert release_skill_model != False + + # Construct a model instance of ReleaseSkill by calling from_dict on the json representation + release_skill_model_dict = ReleaseSkill.from_dict(release_skill_model_json).__dict__ + release_skill_model2 = ReleaseSkill(**release_skill_model_dict) + + # Verify the model instances are equivalent + assert release_skill_model == release_skill_model2 + + # Convert model instance back to dict and verify no loss of data + release_skill_model_json2 = release_skill_model.to_dict() + assert release_skill_model_json2 == release_skill_model_json + + +class TestModel_RequestAnalytics: + """ + Test Class for RequestAnalytics + """ + + def test_request_analytics_serialization(self): + """ + Test serialization/deserialization for RequestAnalytics + """ + + # Construct a json representation of a RequestAnalytics model + request_analytics_model_json = {} + request_analytics_model_json['browser'] = 'testString' + request_analytics_model_json['device'] = 'testString' + request_analytics_model_json['pageUrl'] = 'testString' + + # Construct a model instance of RequestAnalytics by calling from_dict on the json representation + request_analytics_model = RequestAnalytics.from_dict(request_analytics_model_json) + assert request_analytics_model != False + + # Construct a model instance of RequestAnalytics by calling from_dict on the json representation + request_analytics_model_dict = RequestAnalytics.from_dict(request_analytics_model_json).__dict__ + request_analytics_model2 = RequestAnalytics(**request_analytics_model_dict) + + # Verify the model instances are equivalent + assert request_analytics_model == request_analytics_model2 + + # Convert model instance back to dict and verify no loss of data + request_analytics_model_json2 = request_analytics_model.to_dict() + assert request_analytics_model_json2 == request_analytics_model_json + + +class TestModel_ResponseGenericChannel: + """ + Test Class for ResponseGenericChannel + """ + + def test_response_generic_channel_serialization(self): + """ + Test serialization/deserialization for ResponseGenericChannel + """ + + # Construct a json representation of a ResponseGenericChannel model + response_generic_channel_model_json = {} + response_generic_channel_model_json['channel'] = 'testString' + + # Construct a model instance of ResponseGenericChannel by calling from_dict on the json representation + response_generic_channel_model = ResponseGenericChannel.from_dict(response_generic_channel_model_json) + assert response_generic_channel_model != False + + # Construct a model instance of ResponseGenericChannel by calling from_dict on the json representation + response_generic_channel_model_dict = ResponseGenericChannel.from_dict(response_generic_channel_model_json).__dict__ + response_generic_channel_model2 = ResponseGenericChannel(**response_generic_channel_model_dict) + + # Verify the model instances are equivalent + assert response_generic_channel_model == response_generic_channel_model2 + + # Convert model instance back to dict and verify no loss of data + response_generic_channel_model_json2 = response_generic_channel_model.to_dict() + assert response_generic_channel_model_json2 == response_generic_channel_model_json + + +class TestModel_ResponseGenericCitation: + """ + Test Class for ResponseGenericCitation + """ + + def test_response_generic_citation_serialization(self): + """ + Test serialization/deserialization for ResponseGenericCitation + """ + + # Construct dict forms of any model objects needed in order to build this model. + + response_generic_citation_ranges_item_model = {} # ResponseGenericCitationRangesItem + response_generic_citation_ranges_item_model['start'] = 38 + response_generic_citation_ranges_item_model['end'] = 38 + + # Construct a json representation of a ResponseGenericCitation model + response_generic_citation_model_json = {} + response_generic_citation_model_json['title'] = 'testString' + response_generic_citation_model_json['text'] = 'testString' + response_generic_citation_model_json['body'] = 'testString' + response_generic_citation_model_json['search_result_index'] = 38 + response_generic_citation_model_json['ranges'] = [response_generic_citation_ranges_item_model] + + # Construct a model instance of ResponseGenericCitation by calling from_dict on the json representation + response_generic_citation_model = ResponseGenericCitation.from_dict(response_generic_citation_model_json) + assert response_generic_citation_model != False + + # Construct a model instance of ResponseGenericCitation by calling from_dict on the json representation + response_generic_citation_model_dict = ResponseGenericCitation.from_dict(response_generic_citation_model_json).__dict__ + response_generic_citation_model2 = ResponseGenericCitation(**response_generic_citation_model_dict) + + # Verify the model instances are equivalent + assert response_generic_citation_model == response_generic_citation_model2 + + # Convert model instance back to dict and verify no loss of data + response_generic_citation_model_json2 = response_generic_citation_model.to_dict() + assert response_generic_citation_model_json2 == response_generic_citation_model_json + + +class TestModel_ResponseGenericCitationRangesItem: + """ + Test Class for ResponseGenericCitationRangesItem + """ + + def test_response_generic_citation_ranges_item_serialization(self): + """ + Test serialization/deserialization for ResponseGenericCitationRangesItem + """ + + # Construct a json representation of a ResponseGenericCitationRangesItem model + response_generic_citation_ranges_item_model_json = {} + response_generic_citation_ranges_item_model_json['start'] = 38 + response_generic_citation_ranges_item_model_json['end'] = 38 + + # Construct a model instance of ResponseGenericCitationRangesItem by calling from_dict on the json representation + response_generic_citation_ranges_item_model = ResponseGenericCitationRangesItem.from_dict(response_generic_citation_ranges_item_model_json) + assert response_generic_citation_ranges_item_model != False + + # Construct a model instance of ResponseGenericCitationRangesItem by calling from_dict on the json representation + response_generic_citation_ranges_item_model_dict = ResponseGenericCitationRangesItem.from_dict(response_generic_citation_ranges_item_model_json).__dict__ + response_generic_citation_ranges_item_model2 = ResponseGenericCitationRangesItem(**response_generic_citation_ranges_item_model_dict) + + # Verify the model instances are equivalent + assert response_generic_citation_ranges_item_model == response_generic_citation_ranges_item_model2 + + # Convert model instance back to dict and verify no loss of data + response_generic_citation_ranges_item_model_json2 = response_generic_citation_ranges_item_model.to_dict() + assert response_generic_citation_ranges_item_model_json2 == response_generic_citation_ranges_item_model_json + + +class TestModel_ResponseGenericConfidenceScores: + """ + Test Class for ResponseGenericConfidenceScores + """ + + def test_response_generic_confidence_scores_serialization(self): + """ + Test serialization/deserialization for ResponseGenericConfidenceScores + """ + + # Construct a json representation of a ResponseGenericConfidenceScores model + response_generic_confidence_scores_model_json = {} + response_generic_confidence_scores_model_json['threshold'] = 72.5 + response_generic_confidence_scores_model_json['pre_gen'] = 72.5 + response_generic_confidence_scores_model_json['post_gen'] = 72.5 + response_generic_confidence_scores_model_json['extractiveness'] = 72.5 + + # Construct a model instance of ResponseGenericConfidenceScores by calling from_dict on the json representation + response_generic_confidence_scores_model = ResponseGenericConfidenceScores.from_dict(response_generic_confidence_scores_model_json) + assert response_generic_confidence_scores_model != False + + # Construct a model instance of ResponseGenericConfidenceScores by calling from_dict on the json representation + response_generic_confidence_scores_model_dict = ResponseGenericConfidenceScores.from_dict(response_generic_confidence_scores_model_json).__dict__ + response_generic_confidence_scores_model2 = ResponseGenericConfidenceScores(**response_generic_confidence_scores_model_dict) + + # Verify the model instances are equivalent + assert response_generic_confidence_scores_model == response_generic_confidence_scores_model2 + + # Convert model instance back to dict and verify no loss of data + response_generic_confidence_scores_model_json2 = response_generic_confidence_scores_model.to_dict() + assert response_generic_confidence_scores_model_json2 == response_generic_confidence_scores_model_json + + +class TestModel_RuntimeEntity: + """ + Test Class for RuntimeEntity + """ + + def test_runtime_entity_serialization(self): + """ + Test serialization/deserialization for RuntimeEntity + """ + + # Construct dict forms of any model objects needed in order to build this model. + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + # Construct a json representation of a RuntimeEntity model + runtime_entity_model_json = {} + runtime_entity_model_json['entity'] = 'testString' + runtime_entity_model_json['location'] = [38] + runtime_entity_model_json['value'] = 'testString' + runtime_entity_model_json['confidence'] = 72.5 + runtime_entity_model_json['groups'] = [capture_group_model] + runtime_entity_model_json['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model_json['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model_json['role'] = runtime_entity_role_model + runtime_entity_model_json['skill'] = 'testString' + + # Construct a model instance of RuntimeEntity by calling from_dict on the json representation + runtime_entity_model = RuntimeEntity.from_dict(runtime_entity_model_json) + assert runtime_entity_model != False + + # Construct a model instance of RuntimeEntity by calling from_dict on the json representation + runtime_entity_model_dict = RuntimeEntity.from_dict(runtime_entity_model_json).__dict__ + runtime_entity_model2 = RuntimeEntity(**runtime_entity_model_dict) + + # Verify the model instances are equivalent + assert runtime_entity_model == runtime_entity_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_entity_model_json2 = runtime_entity_model.to_dict() + assert runtime_entity_model_json2 == runtime_entity_model_json + + +class TestModel_RuntimeEntityAlternative: + """ + Test Class for RuntimeEntityAlternative + """ + + def test_runtime_entity_alternative_serialization(self): + """ + Test serialization/deserialization for RuntimeEntityAlternative + """ + + # Construct a json representation of a RuntimeEntityAlternative model + runtime_entity_alternative_model_json = {} + runtime_entity_alternative_model_json['value'] = 'testString' + runtime_entity_alternative_model_json['confidence'] = 72.5 + + # Construct a model instance of RuntimeEntityAlternative by calling from_dict on the json representation + runtime_entity_alternative_model = RuntimeEntityAlternative.from_dict(runtime_entity_alternative_model_json) + assert runtime_entity_alternative_model != False + + # Construct a model instance of RuntimeEntityAlternative by calling from_dict on the json representation + runtime_entity_alternative_model_dict = RuntimeEntityAlternative.from_dict(runtime_entity_alternative_model_json).__dict__ + runtime_entity_alternative_model2 = RuntimeEntityAlternative(**runtime_entity_alternative_model_dict) + + # Verify the model instances are equivalent + assert runtime_entity_alternative_model == runtime_entity_alternative_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_entity_alternative_model_json2 = runtime_entity_alternative_model.to_dict() + assert runtime_entity_alternative_model_json2 == runtime_entity_alternative_model_json + + +class TestModel_RuntimeEntityInterpretation: + """ + Test Class for RuntimeEntityInterpretation + """ + + def test_runtime_entity_interpretation_serialization(self): + """ + Test serialization/deserialization for RuntimeEntityInterpretation + """ + + # Construct a json representation of a RuntimeEntityInterpretation model + runtime_entity_interpretation_model_json = {} + runtime_entity_interpretation_model_json['calendar_type'] = 'testString' + runtime_entity_interpretation_model_json['datetime_link'] = 'testString' + runtime_entity_interpretation_model_json['festival'] = 'testString' + runtime_entity_interpretation_model_json['granularity'] = 'day' + runtime_entity_interpretation_model_json['range_link'] = 'testString' + runtime_entity_interpretation_model_json['range_modifier'] = 'testString' + runtime_entity_interpretation_model_json['relative_day'] = 72.5 + runtime_entity_interpretation_model_json['relative_month'] = 72.5 + runtime_entity_interpretation_model_json['relative_week'] = 72.5 + runtime_entity_interpretation_model_json['relative_weekend'] = 72.5 + runtime_entity_interpretation_model_json['relative_year'] = 72.5 + runtime_entity_interpretation_model_json['specific_day'] = 72.5 + runtime_entity_interpretation_model_json['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model_json['specific_month'] = 72.5 + runtime_entity_interpretation_model_json['specific_quarter'] = 72.5 + runtime_entity_interpretation_model_json['specific_year'] = 72.5 + runtime_entity_interpretation_model_json['numeric_value'] = 72.5 + runtime_entity_interpretation_model_json['subtype'] = 'testString' + runtime_entity_interpretation_model_json['part_of_day'] = 'testString' + runtime_entity_interpretation_model_json['relative_hour'] = 72.5 + runtime_entity_interpretation_model_json['relative_minute'] = 72.5 + runtime_entity_interpretation_model_json['relative_second'] = 72.5 + runtime_entity_interpretation_model_json['specific_hour'] = 72.5 + runtime_entity_interpretation_model_json['specific_minute'] = 72.5 + runtime_entity_interpretation_model_json['specific_second'] = 72.5 + runtime_entity_interpretation_model_json['timezone'] = 'testString' + + # Construct a model instance of RuntimeEntityInterpretation by calling from_dict on the json representation + runtime_entity_interpretation_model = RuntimeEntityInterpretation.from_dict(runtime_entity_interpretation_model_json) + assert runtime_entity_interpretation_model != False + + # Construct a model instance of RuntimeEntityInterpretation by calling from_dict on the json representation + runtime_entity_interpretation_model_dict = RuntimeEntityInterpretation.from_dict(runtime_entity_interpretation_model_json).__dict__ + runtime_entity_interpretation_model2 = RuntimeEntityInterpretation(**runtime_entity_interpretation_model_dict) + + # Verify the model instances are equivalent + assert runtime_entity_interpretation_model == runtime_entity_interpretation_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_entity_interpretation_model_json2 = runtime_entity_interpretation_model.to_dict() + assert runtime_entity_interpretation_model_json2 == runtime_entity_interpretation_model_json + + +class TestModel_RuntimeEntityRole: + """ + Test Class for RuntimeEntityRole + """ + + def test_runtime_entity_role_serialization(self): + """ + Test serialization/deserialization for RuntimeEntityRole + """ + + # Construct a json representation of a RuntimeEntityRole model + runtime_entity_role_model_json = {} + runtime_entity_role_model_json['type'] = 'date_from' + + # Construct a model instance of RuntimeEntityRole by calling from_dict on the json representation + runtime_entity_role_model = RuntimeEntityRole.from_dict(runtime_entity_role_model_json) + assert runtime_entity_role_model != False + + # Construct a model instance of RuntimeEntityRole by calling from_dict on the json representation + runtime_entity_role_model_dict = RuntimeEntityRole.from_dict(runtime_entity_role_model_json).__dict__ + runtime_entity_role_model2 = RuntimeEntityRole(**runtime_entity_role_model_dict) + + # Verify the model instances are equivalent + assert runtime_entity_role_model == runtime_entity_role_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_entity_role_model_json2 = runtime_entity_role_model.to_dict() + assert runtime_entity_role_model_json2 == runtime_entity_role_model_json + + +class TestModel_RuntimeIntent: + """ + Test Class for RuntimeIntent + """ + + def test_runtime_intent_serialization(self): + """ + Test serialization/deserialization for RuntimeIntent + """ + + # Construct a json representation of a RuntimeIntent model + runtime_intent_model_json = {} + runtime_intent_model_json['intent'] = 'testString' + runtime_intent_model_json['confidence'] = 72.5 + runtime_intent_model_json['skill'] = 'testString' + + # Construct a model instance of RuntimeIntent by calling from_dict on the json representation + runtime_intent_model = RuntimeIntent.from_dict(runtime_intent_model_json) + assert runtime_intent_model != False + + # Construct a model instance of RuntimeIntent by calling from_dict on the json representation + runtime_intent_model_dict = RuntimeIntent.from_dict(runtime_intent_model_json).__dict__ + runtime_intent_model2 = RuntimeIntent(**runtime_intent_model_dict) + + # Verify the model instances are equivalent + assert runtime_intent_model == runtime_intent_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_intent_model_json2 = runtime_intent_model.to_dict() + assert runtime_intent_model_json2 == runtime_intent_model_json + + +class TestModel_SearchResult: + """ + Test Class for SearchResult + """ + + def test_search_result_serialization(self): + """ + Test serialization/deserialization for SearchResult + """ + + # Construct dict forms of any model objects needed in order to build this model. + + search_result_metadata_model = {} # SearchResultMetadata + search_result_metadata_model['confidence'] = 72.5 + search_result_metadata_model['score'] = 72.5 + + search_result_highlight_model = {} # SearchResultHighlight + search_result_highlight_model['body'] = ['testString'] + search_result_highlight_model['title'] = ['testString'] + search_result_highlight_model['url'] = ['testString'] + search_result_highlight_model['foo'] = ['testString'] + + search_result_answer_model = {} # SearchResultAnswer + search_result_answer_model['text'] = 'testString' + search_result_answer_model['confidence'] = 0 + + # Construct a json representation of a SearchResult model + search_result_model_json = {} + search_result_model_json['id'] = 'testString' + search_result_model_json['result_metadata'] = search_result_metadata_model + search_result_model_json['body'] = 'testString' + search_result_model_json['title'] = 'testString' + search_result_model_json['url'] = 'testString' + search_result_model_json['highlight'] = search_result_highlight_model + search_result_model_json['answers'] = [search_result_answer_model] + + # Construct a model instance of SearchResult by calling from_dict on the json representation + search_result_model = SearchResult.from_dict(search_result_model_json) + assert search_result_model != False + + # Construct a model instance of SearchResult by calling from_dict on the json representation + search_result_model_dict = SearchResult.from_dict(search_result_model_json).__dict__ + search_result_model2 = SearchResult(**search_result_model_dict) + + # Verify the model instances are equivalent + assert search_result_model == search_result_model2 + + # Convert model instance back to dict and verify no loss of data + search_result_model_json2 = search_result_model.to_dict() + assert search_result_model_json2 == search_result_model_json + + +class TestModel_SearchResultAnswer: + """ + Test Class for SearchResultAnswer + """ + + def test_search_result_answer_serialization(self): + """ + Test serialization/deserialization for SearchResultAnswer + """ + + # Construct a json representation of a SearchResultAnswer model + search_result_answer_model_json = {} + search_result_answer_model_json['text'] = 'testString' + search_result_answer_model_json['confidence'] = 0 + + # Construct a model instance of SearchResultAnswer by calling from_dict on the json representation + search_result_answer_model = SearchResultAnswer.from_dict(search_result_answer_model_json) + assert search_result_answer_model != False + + # Construct a model instance of SearchResultAnswer by calling from_dict on the json representation + search_result_answer_model_dict = SearchResultAnswer.from_dict(search_result_answer_model_json).__dict__ + search_result_answer_model2 = SearchResultAnswer(**search_result_answer_model_dict) + + # Verify the model instances are equivalent + assert search_result_answer_model == search_result_answer_model2 + + # Convert model instance back to dict and verify no loss of data + search_result_answer_model_json2 = search_result_answer_model.to_dict() + assert search_result_answer_model_json2 == search_result_answer_model_json + + +class TestModel_SearchResultHighlight: + """ + Test Class for SearchResultHighlight + """ + + def test_search_result_highlight_serialization(self): + """ + Test serialization/deserialization for SearchResultHighlight + """ + + # Construct a json representation of a SearchResultHighlight model + search_result_highlight_model_json = {} + search_result_highlight_model_json['body'] = ['testString'] + search_result_highlight_model_json['title'] = ['testString'] + search_result_highlight_model_json['url'] = ['testString'] + search_result_highlight_model_json['foo'] = ['testString'] + + # Construct a model instance of SearchResultHighlight by calling from_dict on the json representation + search_result_highlight_model = SearchResultHighlight.from_dict(search_result_highlight_model_json) + assert search_result_highlight_model != False + + # Construct a model instance of SearchResultHighlight by calling from_dict on the json representation + search_result_highlight_model_dict = SearchResultHighlight.from_dict(search_result_highlight_model_json).__dict__ + search_result_highlight_model2 = SearchResultHighlight(**search_result_highlight_model_dict) + + # Verify the model instances are equivalent + assert search_result_highlight_model == search_result_highlight_model2 + + # Convert model instance back to dict and verify no loss of data + search_result_highlight_model_json2 = search_result_highlight_model.to_dict() + assert search_result_highlight_model_json2 == search_result_highlight_model_json + + # Test get_properties and set_properties methods. + search_result_highlight_model.set_properties({}) + actual_dict = search_result_highlight_model.get_properties() + assert actual_dict == {} + + expected_dict = {'foo': ['testString']} + search_result_highlight_model.set_properties(expected_dict) + actual_dict = search_result_highlight_model.get_properties() + assert actual_dict.keys() == expected_dict.keys() + + +class TestModel_SearchResultMetadata: + """ + Test Class for SearchResultMetadata + """ + + def test_search_result_metadata_serialization(self): + """ + Test serialization/deserialization for SearchResultMetadata + """ + + # Construct a json representation of a SearchResultMetadata model + search_result_metadata_model_json = {} + search_result_metadata_model_json['confidence'] = 72.5 + search_result_metadata_model_json['score'] = 72.5 + + # Construct a model instance of SearchResultMetadata by calling from_dict on the json representation + search_result_metadata_model = SearchResultMetadata.from_dict(search_result_metadata_model_json) + assert search_result_metadata_model != False + + # Construct a model instance of SearchResultMetadata by calling from_dict on the json representation + search_result_metadata_model_dict = SearchResultMetadata.from_dict(search_result_metadata_model_json).__dict__ + search_result_metadata_model2 = SearchResultMetadata(**search_result_metadata_model_dict) + + # Verify the model instances are equivalent + assert search_result_metadata_model == search_result_metadata_model2 + + # Convert model instance back to dict and verify no loss of data + search_result_metadata_model_json2 = search_result_metadata_model.to_dict() + assert search_result_metadata_model_json2 == search_result_metadata_model_json + + +class TestModel_SearchResults: + """ + Test Class for SearchResults + """ + + def test_search_results_serialization(self): + """ + Test serialization/deserialization for SearchResults + """ + + # Construct dict forms of any model objects needed in order to build this model. + + search_results_result_metadata_model = {} # SearchResultsResultMetadata + search_results_result_metadata_model['document_retrieval_source'] = 'testString' + search_results_result_metadata_model['score'] = 38 + + # Construct a json representation of a SearchResults model + search_results_model_json = {} + search_results_model_json['result_metadata'] = search_results_result_metadata_model + search_results_model_json['id'] = 'testString' + search_results_model_json['title'] = 'testString' + search_results_model_json['body'] = 'testString' + + # Construct a model instance of SearchResults by calling from_dict on the json representation + search_results_model = SearchResults.from_dict(search_results_model_json) + assert search_results_model != False + + # Construct a model instance of SearchResults by calling from_dict on the json representation + search_results_model_dict = SearchResults.from_dict(search_results_model_json).__dict__ + search_results_model2 = SearchResults(**search_results_model_dict) + + # Verify the model instances are equivalent + assert search_results_model == search_results_model2 + + # Convert model instance back to dict and verify no loss of data + search_results_model_json2 = search_results_model.to_dict() + assert search_results_model_json2 == search_results_model_json + + +class TestModel_SearchResultsResultMetadata: + """ + Test Class for SearchResultsResultMetadata + """ + + def test_search_results_result_metadata_serialization(self): + """ + Test serialization/deserialization for SearchResultsResultMetadata + """ + + # Construct a json representation of a SearchResultsResultMetadata model + search_results_result_metadata_model_json = {} + search_results_result_metadata_model_json['document_retrieval_source'] = 'testString' + search_results_result_metadata_model_json['score'] = 38 + + # Construct a model instance of SearchResultsResultMetadata by calling from_dict on the json representation + search_results_result_metadata_model = SearchResultsResultMetadata.from_dict(search_results_result_metadata_model_json) + assert search_results_result_metadata_model != False + + # Construct a model instance of SearchResultsResultMetadata by calling from_dict on the json representation + search_results_result_metadata_model_dict = SearchResultsResultMetadata.from_dict(search_results_result_metadata_model_json).__dict__ + search_results_result_metadata_model2 = SearchResultsResultMetadata(**search_results_result_metadata_model_dict) + + # Verify the model instances are equivalent + assert search_results_result_metadata_model == search_results_result_metadata_model2 + + # Convert model instance back to dict and verify no loss of data + search_results_result_metadata_model_json2 = search_results_result_metadata_model.to_dict() + assert search_results_result_metadata_model_json2 == search_results_result_metadata_model_json + + +class TestModel_SearchSettings: + """ + Test Class for SearchSettings + """ + + def test_search_settings_serialization(self): + """ + Test serialization/deserialization for SearchSettings + """ + + # Construct dict forms of any model objects needed in order to build this model. + + search_settings_discovery_authentication_model = {} # SearchSettingsDiscoveryAuthentication + search_settings_discovery_authentication_model['basic'] = 'testString' + search_settings_discovery_authentication_model['bearer'] = 'testString' + + search_settings_discovery_model = {} # SearchSettingsDiscovery + search_settings_discovery_model['instance_id'] = 'testString' + search_settings_discovery_model['project_id'] = 'testString' + search_settings_discovery_model['url'] = 'testString' + search_settings_discovery_model['max_primary_results'] = 10000 + search_settings_discovery_model['max_total_results'] = 10000 + search_settings_discovery_model['confidence_threshold'] = 0.0 + search_settings_discovery_model['highlight'] = True + search_settings_discovery_model['find_answers'] = True + search_settings_discovery_model['authentication'] = search_settings_discovery_authentication_model + + search_settings_messages_model = {} # SearchSettingsMessages + search_settings_messages_model['success'] = 'testString' + search_settings_messages_model['error'] = 'testString' + search_settings_messages_model['no_result'] = 'testString' + + search_settings_schema_mapping_model = {} # SearchSettingsSchemaMapping + search_settings_schema_mapping_model['url'] = 'testString' + search_settings_schema_mapping_model['body'] = 'testString' + search_settings_schema_mapping_model['title'] = 'testString' + + search_settings_elastic_search_model = {} # SearchSettingsElasticSearch + search_settings_elastic_search_model['url'] = 'testString' + search_settings_elastic_search_model['port'] = 'testString' + search_settings_elastic_search_model['username'] = 'testString' + search_settings_elastic_search_model['password'] = 'testString' + search_settings_elastic_search_model['index'] = 'testString' + search_settings_elastic_search_model['filter'] = ['testString'] + search_settings_elastic_search_model['query_body'] = {'anyKey': 'anyValue'} + search_settings_elastic_search_model['managed_index'] = 'testString' + search_settings_elastic_search_model['apikey'] = 'testString' + + search_settings_conversational_search_response_length_model = {} # SearchSettingsConversationalSearchResponseLength + search_settings_conversational_search_response_length_model['option'] = 'moderate' + + search_settings_conversational_search_search_confidence_model = {} # SearchSettingsConversationalSearchSearchConfidence + search_settings_conversational_search_search_confidence_model['threshold'] = 'less_often' + + search_settings_conversational_search_model = {} # SearchSettingsConversationalSearch + search_settings_conversational_search_model['enabled'] = True + search_settings_conversational_search_model['response_length'] = search_settings_conversational_search_response_length_model + search_settings_conversational_search_model['search_confidence'] = search_settings_conversational_search_search_confidence_model + + search_settings_server_side_search_model = {} # SearchSettingsServerSideSearch + search_settings_server_side_search_model['url'] = 'testString' + search_settings_server_side_search_model['port'] = 'testString' + search_settings_server_side_search_model['username'] = 'testString' + search_settings_server_side_search_model['password'] = 'testString' + search_settings_server_side_search_model['filter'] = 'testString' + search_settings_server_side_search_model['metadata'] = {'anyKey': 'anyValue'} + search_settings_server_side_search_model['apikey'] = 'testString' + search_settings_server_side_search_model['no_auth'] = True + search_settings_server_side_search_model['auth_type'] = 'basic' + + search_settings_client_side_search_model = {} # SearchSettingsClientSideSearch + search_settings_client_side_search_model['filter'] = 'testString' + search_settings_client_side_search_model['metadata'] = {'anyKey': 'anyValue'} + + # Construct a json representation of a SearchSettings model + search_settings_model_json = {} + search_settings_model_json['discovery'] = search_settings_discovery_model + search_settings_model_json['messages'] = search_settings_messages_model + search_settings_model_json['schema_mapping'] = search_settings_schema_mapping_model + search_settings_model_json['elastic_search'] = search_settings_elastic_search_model + search_settings_model_json['conversational_search'] = search_settings_conversational_search_model + search_settings_model_json['server_side_search'] = search_settings_server_side_search_model + search_settings_model_json['client_side_search'] = search_settings_client_side_search_model + + # Construct a model instance of SearchSettings by calling from_dict on the json representation + search_settings_model = SearchSettings.from_dict(search_settings_model_json) + assert search_settings_model != False + + # Construct a model instance of SearchSettings by calling from_dict on the json representation + search_settings_model_dict = SearchSettings.from_dict(search_settings_model_json).__dict__ + search_settings_model2 = SearchSettings(**search_settings_model_dict) + + # Verify the model instances are equivalent + assert search_settings_model == search_settings_model2 + + # Convert model instance back to dict and verify no loss of data + search_settings_model_json2 = search_settings_model.to_dict() + assert search_settings_model_json2 == search_settings_model_json + + +class TestModel_SearchSettingsClientSideSearch: + """ + Test Class for SearchSettingsClientSideSearch + """ + + def test_search_settings_client_side_search_serialization(self): + """ + Test serialization/deserialization for SearchSettingsClientSideSearch + """ + + # Construct a json representation of a SearchSettingsClientSideSearch model + search_settings_client_side_search_model_json = {} + search_settings_client_side_search_model_json['filter'] = 'testString' + search_settings_client_side_search_model_json['metadata'] = {'anyKey': 'anyValue'} + + # Construct a model instance of SearchSettingsClientSideSearch by calling from_dict on the json representation + search_settings_client_side_search_model = SearchSettingsClientSideSearch.from_dict(search_settings_client_side_search_model_json) + assert search_settings_client_side_search_model != False + + # Construct a model instance of SearchSettingsClientSideSearch by calling from_dict on the json representation + search_settings_client_side_search_model_dict = SearchSettingsClientSideSearch.from_dict(search_settings_client_side_search_model_json).__dict__ + search_settings_client_side_search_model2 = SearchSettingsClientSideSearch(**search_settings_client_side_search_model_dict) + + # Verify the model instances are equivalent + assert search_settings_client_side_search_model == search_settings_client_side_search_model2 + + # Convert model instance back to dict and verify no loss of data + search_settings_client_side_search_model_json2 = search_settings_client_side_search_model.to_dict() + assert search_settings_client_side_search_model_json2 == search_settings_client_side_search_model_json + + +class TestModel_SearchSettingsConversationalSearch: + """ + Test Class for SearchSettingsConversationalSearch + """ + + def test_search_settings_conversational_search_serialization(self): + """ + Test serialization/deserialization for SearchSettingsConversationalSearch + """ + + # Construct dict forms of any model objects needed in order to build this model. + + search_settings_conversational_search_response_length_model = {} # SearchSettingsConversationalSearchResponseLength + search_settings_conversational_search_response_length_model['option'] = 'moderate' + + search_settings_conversational_search_search_confidence_model = {} # SearchSettingsConversationalSearchSearchConfidence + search_settings_conversational_search_search_confidence_model['threshold'] = 'less_often' + + # Construct a json representation of a SearchSettingsConversationalSearch model + search_settings_conversational_search_model_json = {} + search_settings_conversational_search_model_json['enabled'] = True + search_settings_conversational_search_model_json['response_length'] = search_settings_conversational_search_response_length_model + search_settings_conversational_search_model_json['search_confidence'] = search_settings_conversational_search_search_confidence_model + + # Construct a model instance of SearchSettingsConversationalSearch by calling from_dict on the json representation + search_settings_conversational_search_model = SearchSettingsConversationalSearch.from_dict(search_settings_conversational_search_model_json) + assert search_settings_conversational_search_model != False + + # Construct a model instance of SearchSettingsConversationalSearch by calling from_dict on the json representation + search_settings_conversational_search_model_dict = SearchSettingsConversationalSearch.from_dict(search_settings_conversational_search_model_json).__dict__ + search_settings_conversational_search_model2 = SearchSettingsConversationalSearch(**search_settings_conversational_search_model_dict) + + # Verify the model instances are equivalent + assert search_settings_conversational_search_model == search_settings_conversational_search_model2 + + # Convert model instance back to dict and verify no loss of data + search_settings_conversational_search_model_json2 = search_settings_conversational_search_model.to_dict() + assert search_settings_conversational_search_model_json2 == search_settings_conversational_search_model_json + + +class TestModel_SearchSettingsConversationalSearchResponseLength: + """ + Test Class for SearchSettingsConversationalSearchResponseLength + """ + + def test_search_settings_conversational_search_response_length_serialization(self): + """ + Test serialization/deserialization for SearchSettingsConversationalSearchResponseLength + """ + + # Construct a json representation of a SearchSettingsConversationalSearchResponseLength model + search_settings_conversational_search_response_length_model_json = {} + search_settings_conversational_search_response_length_model_json['option'] = 'moderate' + + # Construct a model instance of SearchSettingsConversationalSearchResponseLength by calling from_dict on the json representation + search_settings_conversational_search_response_length_model = SearchSettingsConversationalSearchResponseLength.from_dict(search_settings_conversational_search_response_length_model_json) + assert search_settings_conversational_search_response_length_model != False + + # Construct a model instance of SearchSettingsConversationalSearchResponseLength by calling from_dict on the json representation + search_settings_conversational_search_response_length_model_dict = SearchSettingsConversationalSearchResponseLength.from_dict(search_settings_conversational_search_response_length_model_json).__dict__ + search_settings_conversational_search_response_length_model2 = SearchSettingsConversationalSearchResponseLength(**search_settings_conversational_search_response_length_model_dict) + + # Verify the model instances are equivalent + assert search_settings_conversational_search_response_length_model == search_settings_conversational_search_response_length_model2 + + # Convert model instance back to dict and verify no loss of data + search_settings_conversational_search_response_length_model_json2 = search_settings_conversational_search_response_length_model.to_dict() + assert search_settings_conversational_search_response_length_model_json2 == search_settings_conversational_search_response_length_model_json + + +class TestModel_SearchSettingsConversationalSearchSearchConfidence: + """ + Test Class for SearchSettingsConversationalSearchSearchConfidence + """ + + def test_search_settings_conversational_search_search_confidence_serialization(self): + """ + Test serialization/deserialization for SearchSettingsConversationalSearchSearchConfidence + """ + + # Construct a json representation of a SearchSettingsConversationalSearchSearchConfidence model + search_settings_conversational_search_search_confidence_model_json = {} + search_settings_conversational_search_search_confidence_model_json['threshold'] = 'less_often' + + # Construct a model instance of SearchSettingsConversationalSearchSearchConfidence by calling from_dict on the json representation + search_settings_conversational_search_search_confidence_model = SearchSettingsConversationalSearchSearchConfidence.from_dict(search_settings_conversational_search_search_confidence_model_json) + assert search_settings_conversational_search_search_confidence_model != False + + # Construct a model instance of SearchSettingsConversationalSearchSearchConfidence by calling from_dict on the json representation + search_settings_conversational_search_search_confidence_model_dict = SearchSettingsConversationalSearchSearchConfidence.from_dict(search_settings_conversational_search_search_confidence_model_json).__dict__ + search_settings_conversational_search_search_confidence_model2 = SearchSettingsConversationalSearchSearchConfidence(**search_settings_conversational_search_search_confidence_model_dict) + + # Verify the model instances are equivalent + assert search_settings_conversational_search_search_confidence_model == search_settings_conversational_search_search_confidence_model2 + + # Convert model instance back to dict and verify no loss of data + search_settings_conversational_search_search_confidence_model_json2 = search_settings_conversational_search_search_confidence_model.to_dict() + assert search_settings_conversational_search_search_confidence_model_json2 == search_settings_conversational_search_search_confidence_model_json + + +class TestModel_SearchSettingsDiscovery: + """ + Test Class for SearchSettingsDiscovery + """ + + def test_search_settings_discovery_serialization(self): + """ + Test serialization/deserialization for SearchSettingsDiscovery + """ + + # Construct dict forms of any model objects needed in order to build this model. + + search_settings_discovery_authentication_model = {} # SearchSettingsDiscoveryAuthentication + search_settings_discovery_authentication_model['basic'] = 'testString' + search_settings_discovery_authentication_model['bearer'] = 'testString' + + # Construct a json representation of a SearchSettingsDiscovery model + search_settings_discovery_model_json = {} + search_settings_discovery_model_json['instance_id'] = 'testString' + search_settings_discovery_model_json['project_id'] = 'testString' + search_settings_discovery_model_json['url'] = 'testString' + search_settings_discovery_model_json['max_primary_results'] = 10000 + search_settings_discovery_model_json['max_total_results'] = 10000 + search_settings_discovery_model_json['confidence_threshold'] = 0.0 + search_settings_discovery_model_json['highlight'] = True + search_settings_discovery_model_json['find_answers'] = True + search_settings_discovery_model_json['authentication'] = search_settings_discovery_authentication_model + + # Construct a model instance of SearchSettingsDiscovery by calling from_dict on the json representation + search_settings_discovery_model = SearchSettingsDiscovery.from_dict(search_settings_discovery_model_json) + assert search_settings_discovery_model != False + + # Construct a model instance of SearchSettingsDiscovery by calling from_dict on the json representation + search_settings_discovery_model_dict = SearchSettingsDiscovery.from_dict(search_settings_discovery_model_json).__dict__ + search_settings_discovery_model2 = SearchSettingsDiscovery(**search_settings_discovery_model_dict) + + # Verify the model instances are equivalent + assert search_settings_discovery_model == search_settings_discovery_model2 + + # Convert model instance back to dict and verify no loss of data + search_settings_discovery_model_json2 = search_settings_discovery_model.to_dict() + assert search_settings_discovery_model_json2 == search_settings_discovery_model_json + + +class TestModel_SearchSettingsDiscoveryAuthentication: + """ + Test Class for SearchSettingsDiscoveryAuthentication + """ + + def test_search_settings_discovery_authentication_serialization(self): + """ + Test serialization/deserialization for SearchSettingsDiscoveryAuthentication + """ + + # Construct a json representation of a SearchSettingsDiscoveryAuthentication model + search_settings_discovery_authentication_model_json = {} + search_settings_discovery_authentication_model_json['basic'] = 'testString' + search_settings_discovery_authentication_model_json['bearer'] = 'testString' + + # Construct a model instance of SearchSettingsDiscoveryAuthentication by calling from_dict on the json representation + search_settings_discovery_authentication_model = SearchSettingsDiscoveryAuthentication.from_dict(search_settings_discovery_authentication_model_json) + assert search_settings_discovery_authentication_model != False + + # Construct a model instance of SearchSettingsDiscoveryAuthentication by calling from_dict on the json representation + search_settings_discovery_authentication_model_dict = SearchSettingsDiscoveryAuthentication.from_dict(search_settings_discovery_authentication_model_json).__dict__ + search_settings_discovery_authentication_model2 = SearchSettingsDiscoveryAuthentication(**search_settings_discovery_authentication_model_dict) + + # Verify the model instances are equivalent + assert search_settings_discovery_authentication_model == search_settings_discovery_authentication_model2 + + # Convert model instance back to dict and verify no loss of data + search_settings_discovery_authentication_model_json2 = search_settings_discovery_authentication_model.to_dict() + assert search_settings_discovery_authentication_model_json2 == search_settings_discovery_authentication_model_json + + +class TestModel_SearchSettingsElasticSearch: + """ + Test Class for SearchSettingsElasticSearch + """ + + def test_search_settings_elastic_search_serialization(self): + """ + Test serialization/deserialization for SearchSettingsElasticSearch + """ + + # Construct a json representation of a SearchSettingsElasticSearch model + search_settings_elastic_search_model_json = {} + search_settings_elastic_search_model_json['url'] = 'testString' + search_settings_elastic_search_model_json['port'] = 'testString' + search_settings_elastic_search_model_json['username'] = 'testString' + search_settings_elastic_search_model_json['password'] = 'testString' + search_settings_elastic_search_model_json['index'] = 'testString' + search_settings_elastic_search_model_json['filter'] = ['testString'] + search_settings_elastic_search_model_json['query_body'] = {'anyKey': 'anyValue'} + search_settings_elastic_search_model_json['managed_index'] = 'testString' + search_settings_elastic_search_model_json['apikey'] = 'testString' + + # Construct a model instance of SearchSettingsElasticSearch by calling from_dict on the json representation + search_settings_elastic_search_model = SearchSettingsElasticSearch.from_dict(search_settings_elastic_search_model_json) + assert search_settings_elastic_search_model != False + + # Construct a model instance of SearchSettingsElasticSearch by calling from_dict on the json representation + search_settings_elastic_search_model_dict = SearchSettingsElasticSearch.from_dict(search_settings_elastic_search_model_json).__dict__ + search_settings_elastic_search_model2 = SearchSettingsElasticSearch(**search_settings_elastic_search_model_dict) + + # Verify the model instances are equivalent + assert search_settings_elastic_search_model == search_settings_elastic_search_model2 + + # Convert model instance back to dict and verify no loss of data + search_settings_elastic_search_model_json2 = search_settings_elastic_search_model.to_dict() + assert search_settings_elastic_search_model_json2 == search_settings_elastic_search_model_json + + +class TestModel_SearchSettingsMessages: + """ + Test Class for SearchSettingsMessages + """ + + def test_search_settings_messages_serialization(self): + """ + Test serialization/deserialization for SearchSettingsMessages + """ + + # Construct a json representation of a SearchSettingsMessages model + search_settings_messages_model_json = {} + search_settings_messages_model_json['success'] = 'testString' + search_settings_messages_model_json['error'] = 'testString' + search_settings_messages_model_json['no_result'] = 'testString' + + # Construct a model instance of SearchSettingsMessages by calling from_dict on the json representation + search_settings_messages_model = SearchSettingsMessages.from_dict(search_settings_messages_model_json) + assert search_settings_messages_model != False + + # Construct a model instance of SearchSettingsMessages by calling from_dict on the json representation + search_settings_messages_model_dict = SearchSettingsMessages.from_dict(search_settings_messages_model_json).__dict__ + search_settings_messages_model2 = SearchSettingsMessages(**search_settings_messages_model_dict) + + # Verify the model instances are equivalent + assert search_settings_messages_model == search_settings_messages_model2 + + # Convert model instance back to dict and verify no loss of data + search_settings_messages_model_json2 = search_settings_messages_model.to_dict() + assert search_settings_messages_model_json2 == search_settings_messages_model_json + + +class TestModel_SearchSettingsSchemaMapping: + """ + Test Class for SearchSettingsSchemaMapping + """ + + def test_search_settings_schema_mapping_serialization(self): + """ + Test serialization/deserialization for SearchSettingsSchemaMapping + """ + + # Construct a json representation of a SearchSettingsSchemaMapping model + search_settings_schema_mapping_model_json = {} + search_settings_schema_mapping_model_json['url'] = 'testString' + search_settings_schema_mapping_model_json['body'] = 'testString' + search_settings_schema_mapping_model_json['title'] = 'testString' + + # Construct a model instance of SearchSettingsSchemaMapping by calling from_dict on the json representation + search_settings_schema_mapping_model = SearchSettingsSchemaMapping.from_dict(search_settings_schema_mapping_model_json) + assert search_settings_schema_mapping_model != False + + # Construct a model instance of SearchSettingsSchemaMapping by calling from_dict on the json representation + search_settings_schema_mapping_model_dict = SearchSettingsSchemaMapping.from_dict(search_settings_schema_mapping_model_json).__dict__ + search_settings_schema_mapping_model2 = SearchSettingsSchemaMapping(**search_settings_schema_mapping_model_dict) + + # Verify the model instances are equivalent + assert search_settings_schema_mapping_model == search_settings_schema_mapping_model2 + + # Convert model instance back to dict and verify no loss of data + search_settings_schema_mapping_model_json2 = search_settings_schema_mapping_model.to_dict() + assert search_settings_schema_mapping_model_json2 == search_settings_schema_mapping_model_json + + +class TestModel_SearchSettingsServerSideSearch: + """ + Test Class for SearchSettingsServerSideSearch + """ + + def test_search_settings_server_side_search_serialization(self): + """ + Test serialization/deserialization for SearchSettingsServerSideSearch + """ + + # Construct a json representation of a SearchSettingsServerSideSearch model + search_settings_server_side_search_model_json = {} + search_settings_server_side_search_model_json['url'] = 'testString' + search_settings_server_side_search_model_json['port'] = 'testString' + search_settings_server_side_search_model_json['username'] = 'testString' + search_settings_server_side_search_model_json['password'] = 'testString' + search_settings_server_side_search_model_json['filter'] = 'testString' + search_settings_server_side_search_model_json['metadata'] = {'anyKey': 'anyValue'} + search_settings_server_side_search_model_json['apikey'] = 'testString' + search_settings_server_side_search_model_json['no_auth'] = True + search_settings_server_side_search_model_json['auth_type'] = 'basic' + + # Construct a model instance of SearchSettingsServerSideSearch by calling from_dict on the json representation + search_settings_server_side_search_model = SearchSettingsServerSideSearch.from_dict(search_settings_server_side_search_model_json) + assert search_settings_server_side_search_model != False + + # Construct a model instance of SearchSettingsServerSideSearch by calling from_dict on the json representation + search_settings_server_side_search_model_dict = SearchSettingsServerSideSearch.from_dict(search_settings_server_side_search_model_json).__dict__ + search_settings_server_side_search_model2 = SearchSettingsServerSideSearch(**search_settings_server_side_search_model_dict) + + # Verify the model instances are equivalent + assert search_settings_server_side_search_model == search_settings_server_side_search_model2 + + # Convert model instance back to dict and verify no loss of data + search_settings_server_side_search_model_json2 = search_settings_server_side_search_model.to_dict() + assert search_settings_server_side_search_model_json2 == search_settings_server_side_search_model_json + + +class TestModel_SearchSkillWarning: + """ + Test Class for SearchSkillWarning + """ + + def test_search_skill_warning_serialization(self): + """ + Test serialization/deserialization for SearchSkillWarning + """ + + # Construct a json representation of a SearchSkillWarning model + search_skill_warning_model_json = {} + search_skill_warning_model_json['code'] = 'testString' + search_skill_warning_model_json['path'] = 'testString' + search_skill_warning_model_json['message'] = 'testString' + + # Construct a model instance of SearchSkillWarning by calling from_dict on the json representation + search_skill_warning_model = SearchSkillWarning.from_dict(search_skill_warning_model_json) + assert search_skill_warning_model != False + + # Construct a model instance of SearchSkillWarning by calling from_dict on the json representation + search_skill_warning_model_dict = SearchSkillWarning.from_dict(search_skill_warning_model_json).__dict__ + search_skill_warning_model2 = SearchSkillWarning(**search_skill_warning_model_dict) + + # Verify the model instances are equivalent + assert search_skill_warning_model == search_skill_warning_model2 + + # Convert model instance back to dict and verify no loss of data + search_skill_warning_model_json2 = search_skill_warning_model.to_dict() + assert search_skill_warning_model_json2 == search_skill_warning_model_json + + +class TestModel_SessionResponse: + """ + Test Class for SessionResponse + """ + + def test_session_response_serialization(self): + """ + Test serialization/deserialization for SessionResponse + """ + + # Construct a json representation of a SessionResponse model + session_response_model_json = {} + session_response_model_json['session_id'] = 'testString' + + # Construct a model instance of SessionResponse by calling from_dict on the json representation + session_response_model = SessionResponse.from_dict(session_response_model_json) + assert session_response_model != False + + # Construct a model instance of SessionResponse by calling from_dict on the json representation + session_response_model_dict = SessionResponse.from_dict(session_response_model_json).__dict__ + session_response_model2 = SessionResponse(**session_response_model_dict) + + # Verify the model instances are equivalent + assert session_response_model == session_response_model2 + + # Convert model instance back to dict and verify no loss of data + session_response_model_json2 = session_response_model.to_dict() + assert session_response_model_json2 == session_response_model_json + + +class TestModel_Skill: + """ + Test Class for Skill + """ + + def test_skill_serialization(self): + """ + Test serialization/deserialization for Skill + """ + + # Construct dict forms of any model objects needed in order to build this model. + + search_settings_discovery_authentication_model = {} # SearchSettingsDiscoveryAuthentication + search_settings_discovery_authentication_model['basic'] = 'testString' + search_settings_discovery_authentication_model['bearer'] = 'testString' + + search_settings_discovery_model = {} # SearchSettingsDiscovery + search_settings_discovery_model['instance_id'] = 'testString' + search_settings_discovery_model['project_id'] = 'testString' + search_settings_discovery_model['url'] = 'testString' + search_settings_discovery_model['max_primary_results'] = 10000 + search_settings_discovery_model['max_total_results'] = 10000 + search_settings_discovery_model['confidence_threshold'] = 0.0 + search_settings_discovery_model['highlight'] = True + search_settings_discovery_model['find_answers'] = True + search_settings_discovery_model['authentication'] = search_settings_discovery_authentication_model + + search_settings_messages_model = {} # SearchSettingsMessages + search_settings_messages_model['success'] = 'testString' + search_settings_messages_model['error'] = 'testString' + search_settings_messages_model['no_result'] = 'testString' + + search_settings_schema_mapping_model = {} # SearchSettingsSchemaMapping + search_settings_schema_mapping_model['url'] = 'testString' + search_settings_schema_mapping_model['body'] = 'testString' + search_settings_schema_mapping_model['title'] = 'testString' + + search_settings_elastic_search_model = {} # SearchSettingsElasticSearch + search_settings_elastic_search_model['url'] = 'testString' + search_settings_elastic_search_model['port'] = 'testString' + search_settings_elastic_search_model['username'] = 'testString' + search_settings_elastic_search_model['password'] = 'testString' + search_settings_elastic_search_model['index'] = 'testString' + search_settings_elastic_search_model['filter'] = ['testString'] + search_settings_elastic_search_model['query_body'] = {'anyKey': 'anyValue'} + search_settings_elastic_search_model['managed_index'] = 'testString' + search_settings_elastic_search_model['apikey'] = 'testString' + + search_settings_conversational_search_response_length_model = {} # SearchSettingsConversationalSearchResponseLength + search_settings_conversational_search_response_length_model['option'] = 'moderate' + + search_settings_conversational_search_search_confidence_model = {} # SearchSettingsConversationalSearchSearchConfidence + search_settings_conversational_search_search_confidence_model['threshold'] = 'less_often' + + search_settings_conversational_search_model = {} # SearchSettingsConversationalSearch + search_settings_conversational_search_model['enabled'] = True + search_settings_conversational_search_model['response_length'] = search_settings_conversational_search_response_length_model + search_settings_conversational_search_model['search_confidence'] = search_settings_conversational_search_search_confidence_model + + search_settings_server_side_search_model = {} # SearchSettingsServerSideSearch + search_settings_server_side_search_model['url'] = 'testString' + search_settings_server_side_search_model['port'] = 'testString' + search_settings_server_side_search_model['username'] = 'testString' + search_settings_server_side_search_model['password'] = 'testString' + search_settings_server_side_search_model['filter'] = 'testString' + search_settings_server_side_search_model['metadata'] = {'anyKey': 'anyValue'} + search_settings_server_side_search_model['apikey'] = 'testString' + search_settings_server_side_search_model['no_auth'] = True + search_settings_server_side_search_model['auth_type'] = 'basic' + + search_settings_client_side_search_model = {} # SearchSettingsClientSideSearch + search_settings_client_side_search_model['filter'] = 'testString' + search_settings_client_side_search_model['metadata'] = {'anyKey': 'anyValue'} + + search_settings_model = {} # SearchSettings + search_settings_model['discovery'] = search_settings_discovery_model + search_settings_model['messages'] = search_settings_messages_model + search_settings_model['schema_mapping'] = search_settings_schema_mapping_model + search_settings_model['elastic_search'] = search_settings_elastic_search_model + search_settings_model['conversational_search'] = search_settings_conversational_search_model + search_settings_model['server_side_search'] = search_settings_server_side_search_model + search_settings_model['client_side_search'] = search_settings_client_side_search_model + + # Construct a json representation of a Skill model + skill_model_json = {} + skill_model_json['name'] = 'testString' + skill_model_json['description'] = 'testString' + skill_model_json['workspace'] = {'anyKey': 'anyValue'} + skill_model_json['dialog_settings'] = {'anyKey': 'anyValue'} + skill_model_json['search_settings'] = search_settings_model + skill_model_json['language'] = 'testString' + skill_model_json['type'] = 'action' + + # Construct a model instance of Skill by calling from_dict on the json representation + skill_model = Skill.from_dict(skill_model_json) + assert skill_model != False + + # Construct a model instance of Skill by calling from_dict on the json representation + skill_model_dict = Skill.from_dict(skill_model_json).__dict__ + skill_model2 = Skill(**skill_model_dict) + + # Verify the model instances are equivalent + assert skill_model == skill_model2 + + # Convert model instance back to dict and verify no loss of data + skill_model_json2 = skill_model.to_dict() + assert skill_model_json2 == skill_model_json + + +class TestModel_SkillImport: + """ + Test Class for SkillImport + """ + + def test_skill_import_serialization(self): + """ + Test serialization/deserialization for SkillImport + """ + + # Construct dict forms of any model objects needed in order to build this model. + + search_settings_discovery_authentication_model = {} # SearchSettingsDiscoveryAuthentication + search_settings_discovery_authentication_model['basic'] = 'testString' + search_settings_discovery_authentication_model['bearer'] = 'testString' + + search_settings_discovery_model = {} # SearchSettingsDiscovery + search_settings_discovery_model['instance_id'] = 'testString' + search_settings_discovery_model['project_id'] = 'testString' + search_settings_discovery_model['url'] = 'testString' + search_settings_discovery_model['max_primary_results'] = 10000 + search_settings_discovery_model['max_total_results'] = 10000 + search_settings_discovery_model['confidence_threshold'] = 0.0 + search_settings_discovery_model['highlight'] = True + search_settings_discovery_model['find_answers'] = True + search_settings_discovery_model['authentication'] = search_settings_discovery_authentication_model + + search_settings_messages_model = {} # SearchSettingsMessages + search_settings_messages_model['success'] = 'testString' + search_settings_messages_model['error'] = 'testString' + search_settings_messages_model['no_result'] = 'testString' + + search_settings_schema_mapping_model = {} # SearchSettingsSchemaMapping + search_settings_schema_mapping_model['url'] = 'testString' + search_settings_schema_mapping_model['body'] = 'testString' + search_settings_schema_mapping_model['title'] = 'testString' + + search_settings_elastic_search_model = {} # SearchSettingsElasticSearch + search_settings_elastic_search_model['url'] = 'testString' + search_settings_elastic_search_model['port'] = 'testString' + search_settings_elastic_search_model['username'] = 'testString' + search_settings_elastic_search_model['password'] = 'testString' + search_settings_elastic_search_model['index'] = 'testString' + search_settings_elastic_search_model['filter'] = ['testString'] + search_settings_elastic_search_model['query_body'] = {'anyKey': 'anyValue'} + search_settings_elastic_search_model['managed_index'] = 'testString' + search_settings_elastic_search_model['apikey'] = 'testString' + + search_settings_conversational_search_response_length_model = {} # SearchSettingsConversationalSearchResponseLength + search_settings_conversational_search_response_length_model['option'] = 'moderate' + + search_settings_conversational_search_search_confidence_model = {} # SearchSettingsConversationalSearchSearchConfidence + search_settings_conversational_search_search_confidence_model['threshold'] = 'less_often' + + search_settings_conversational_search_model = {} # SearchSettingsConversationalSearch + search_settings_conversational_search_model['enabled'] = True + search_settings_conversational_search_model['response_length'] = search_settings_conversational_search_response_length_model + search_settings_conversational_search_model['search_confidence'] = search_settings_conversational_search_search_confidence_model + + search_settings_server_side_search_model = {} # SearchSettingsServerSideSearch + search_settings_server_side_search_model['url'] = 'testString' + search_settings_server_side_search_model['port'] = 'testString' + search_settings_server_side_search_model['username'] = 'testString' + search_settings_server_side_search_model['password'] = 'testString' + search_settings_server_side_search_model['filter'] = 'testString' + search_settings_server_side_search_model['metadata'] = {'anyKey': 'anyValue'} + search_settings_server_side_search_model['apikey'] = 'testString' + search_settings_server_side_search_model['no_auth'] = True + search_settings_server_side_search_model['auth_type'] = 'basic' + + search_settings_client_side_search_model = {} # SearchSettingsClientSideSearch + search_settings_client_side_search_model['filter'] = 'testString' + search_settings_client_side_search_model['metadata'] = {'anyKey': 'anyValue'} + + search_settings_model = {} # SearchSettings + search_settings_model['discovery'] = search_settings_discovery_model + search_settings_model['messages'] = search_settings_messages_model + search_settings_model['schema_mapping'] = search_settings_schema_mapping_model + search_settings_model['elastic_search'] = search_settings_elastic_search_model + search_settings_model['conversational_search'] = search_settings_conversational_search_model + search_settings_model['server_side_search'] = search_settings_server_side_search_model + search_settings_model['client_side_search'] = search_settings_client_side_search_model + + # Construct a json representation of a SkillImport model + skill_import_model_json = {} + skill_import_model_json['name'] = 'testString' + skill_import_model_json['description'] = 'testString' + skill_import_model_json['workspace'] = {'anyKey': 'anyValue'} + skill_import_model_json['dialog_settings'] = {'anyKey': 'anyValue'} + skill_import_model_json['search_settings'] = search_settings_model + skill_import_model_json['language'] = 'testString' + skill_import_model_json['type'] = 'action' + + # Construct a model instance of SkillImport by calling from_dict on the json representation + skill_import_model = SkillImport.from_dict(skill_import_model_json) + assert skill_import_model != False + + # Construct a model instance of SkillImport by calling from_dict on the json representation + skill_import_model_dict = SkillImport.from_dict(skill_import_model_json).__dict__ + skill_import_model2 = SkillImport(**skill_import_model_dict) + + # Verify the model instances are equivalent + assert skill_import_model == skill_import_model2 + + # Convert model instance back to dict and verify no loss of data + skill_import_model_json2 = skill_import_model.to_dict() + assert skill_import_model_json2 == skill_import_model_json + + +class TestModel_SkillsAsyncRequestStatus: + """ + Test Class for SkillsAsyncRequestStatus + """ + + def test_skills_async_request_status_serialization(self): + """ + Test serialization/deserialization for SkillsAsyncRequestStatus + """ + + # Construct a json representation of a SkillsAsyncRequestStatus model + skills_async_request_status_model_json = {} + + # Construct a model instance of SkillsAsyncRequestStatus by calling from_dict on the json representation + skills_async_request_status_model = SkillsAsyncRequestStatus.from_dict(skills_async_request_status_model_json) + assert skills_async_request_status_model != False + + # Construct a model instance of SkillsAsyncRequestStatus by calling from_dict on the json representation + skills_async_request_status_model_dict = SkillsAsyncRequestStatus.from_dict(skills_async_request_status_model_json).__dict__ + skills_async_request_status_model2 = SkillsAsyncRequestStatus(**skills_async_request_status_model_dict) + + # Verify the model instances are equivalent + assert skills_async_request_status_model == skills_async_request_status_model2 + + # Convert model instance back to dict and verify no loss of data + skills_async_request_status_model_json2 = skills_async_request_status_model.to_dict() + assert skills_async_request_status_model_json2 == skills_async_request_status_model_json + + +class TestModel_SkillsExport: + """ + Test Class for SkillsExport + """ + + def test_skills_export_serialization(self): + """ + Test serialization/deserialization for SkillsExport + """ + + # Construct dict forms of any model objects needed in order to build this model. + + search_settings_discovery_authentication_model = {} # SearchSettingsDiscoveryAuthentication + search_settings_discovery_authentication_model['basic'] = 'testString' + search_settings_discovery_authentication_model['bearer'] = 'testString' + + search_settings_discovery_model = {} # SearchSettingsDiscovery + search_settings_discovery_model['instance_id'] = 'testString' + search_settings_discovery_model['project_id'] = 'testString' + search_settings_discovery_model['url'] = 'testString' + search_settings_discovery_model['max_primary_results'] = 10000 + search_settings_discovery_model['max_total_results'] = 10000 + search_settings_discovery_model['confidence_threshold'] = 0.0 + search_settings_discovery_model['highlight'] = True + search_settings_discovery_model['find_answers'] = True + search_settings_discovery_model['authentication'] = search_settings_discovery_authentication_model + + search_settings_messages_model = {} # SearchSettingsMessages + search_settings_messages_model['success'] = 'testString' + search_settings_messages_model['error'] = 'testString' + search_settings_messages_model['no_result'] = 'testString' + + search_settings_schema_mapping_model = {} # SearchSettingsSchemaMapping + search_settings_schema_mapping_model['url'] = 'testString' + search_settings_schema_mapping_model['body'] = 'testString' + search_settings_schema_mapping_model['title'] = 'testString' + + search_settings_elastic_search_model = {} # SearchSettingsElasticSearch + search_settings_elastic_search_model['url'] = 'testString' + search_settings_elastic_search_model['port'] = 'testString' + search_settings_elastic_search_model['username'] = 'testString' + search_settings_elastic_search_model['password'] = 'testString' + search_settings_elastic_search_model['index'] = 'testString' + search_settings_elastic_search_model['filter'] = ['testString'] + search_settings_elastic_search_model['query_body'] = {'anyKey': 'anyValue'} + search_settings_elastic_search_model['managed_index'] = 'testString' + search_settings_elastic_search_model['apikey'] = 'testString' + + search_settings_conversational_search_response_length_model = {} # SearchSettingsConversationalSearchResponseLength + search_settings_conversational_search_response_length_model['option'] = 'moderate' + + search_settings_conversational_search_search_confidence_model = {} # SearchSettingsConversationalSearchSearchConfidence + search_settings_conversational_search_search_confidence_model['threshold'] = 'less_often' + + search_settings_conversational_search_model = {} # SearchSettingsConversationalSearch + search_settings_conversational_search_model['enabled'] = True + search_settings_conversational_search_model['response_length'] = search_settings_conversational_search_response_length_model + search_settings_conversational_search_model['search_confidence'] = search_settings_conversational_search_search_confidence_model + + search_settings_server_side_search_model = {} # SearchSettingsServerSideSearch + search_settings_server_side_search_model['url'] = 'testString' + search_settings_server_side_search_model['port'] = 'testString' + search_settings_server_side_search_model['username'] = 'testString' + search_settings_server_side_search_model['password'] = 'testString' + search_settings_server_side_search_model['filter'] = 'testString' + search_settings_server_side_search_model['metadata'] = {'anyKey': 'anyValue'} + search_settings_server_side_search_model['apikey'] = 'testString' + search_settings_server_side_search_model['no_auth'] = True + search_settings_server_side_search_model['auth_type'] = 'basic' + + search_settings_client_side_search_model = {} # SearchSettingsClientSideSearch + search_settings_client_side_search_model['filter'] = 'testString' + search_settings_client_side_search_model['metadata'] = {'anyKey': 'anyValue'} + + search_settings_model = {} # SearchSettings + search_settings_model['discovery'] = search_settings_discovery_model + search_settings_model['messages'] = search_settings_messages_model + search_settings_model['schema_mapping'] = search_settings_schema_mapping_model + search_settings_model['elastic_search'] = search_settings_elastic_search_model + search_settings_model['conversational_search'] = search_settings_conversational_search_model + search_settings_model['server_side_search'] = search_settings_server_side_search_model + search_settings_model['client_side_search'] = search_settings_client_side_search_model + + skill_model = {} # Skill + skill_model['name'] = 'testString' + skill_model['description'] = 'testString' + skill_model['workspace'] = {'anyKey': 'anyValue'} + skill_model['dialog_settings'] = {'anyKey': 'anyValue'} + skill_model['search_settings'] = search_settings_model + skill_model['language'] = 'testString' + skill_model['type'] = 'action' + + assistant_state_model = {} # AssistantState + assistant_state_model['action_disabled'] = True + assistant_state_model['dialog_disabled'] = True + + # Construct a json representation of a SkillsExport model + skills_export_model_json = {} + skills_export_model_json['assistant_skills'] = [skill_model] + skills_export_model_json['assistant_state'] = assistant_state_model + + # Construct a model instance of SkillsExport by calling from_dict on the json representation + skills_export_model = SkillsExport.from_dict(skills_export_model_json) + assert skills_export_model != False + + # Construct a model instance of SkillsExport by calling from_dict on the json representation + skills_export_model_dict = SkillsExport.from_dict(skills_export_model_json).__dict__ + skills_export_model2 = SkillsExport(**skills_export_model_dict) + + # Verify the model instances are equivalent + assert skills_export_model == skills_export_model2 + + # Convert model instance back to dict and verify no loss of data + skills_export_model_json2 = skills_export_model.to_dict() + assert skills_export_model_json2 == skills_export_model_json + + +class TestModel_StatefulMessageResponse: + """ + Test Class for StatefulMessageResponse + """ + + def test_stateful_message_response_serialization(self): + """ + Test serialization/deserialization for StatefulMessageResponse + """ + + # Construct dict forms of any model objects needed in order to build this model. + + response_generic_citation_ranges_item_model = {} # ResponseGenericCitationRangesItem + response_generic_citation_ranges_item_model['start'] = 38 + response_generic_citation_ranges_item_model['end'] = 38 + + response_generic_citation_model = {} # ResponseGenericCitation + response_generic_citation_model['title'] = 'testString' + response_generic_citation_model['text'] = 'testString' + response_generic_citation_model['body'] = 'testString' + response_generic_citation_model['search_result_index'] = 38 + response_generic_citation_model['ranges'] = [response_generic_citation_ranges_item_model] + + response_generic_confidence_scores_model = {} # ResponseGenericConfidenceScores + response_generic_confidence_scores_model['threshold'] = 72.5 + response_generic_confidence_scores_model['pre_gen'] = 72.5 + response_generic_confidence_scores_model['post_gen'] = 72.5 + response_generic_confidence_scores_model['extractiveness'] = 72.5 + + search_results_result_metadata_model = {} # SearchResultsResultMetadata + search_results_result_metadata_model['document_retrieval_source'] = 'testString' + search_results_result_metadata_model['score'] = 38 + + search_results_model = {} # SearchResults + search_results_model['result_metadata'] = search_results_result_metadata_model + search_results_model['id'] = 'testString' + search_results_model['title'] = 'testString' + search_results_model['body'] = 'testString' + + runtime_response_generic_model = {} # RuntimeResponseGenericRuntimeResponseTypeConversationalSearch + runtime_response_generic_model['response_type'] = 'conversation_search' + runtime_response_generic_model['text'] = 'testString' + runtime_response_generic_model['citations_title'] = 'testString' + runtime_response_generic_model['citations'] = [response_generic_citation_model] + runtime_response_generic_model['confidence_scores'] = response_generic_confidence_scores_model + runtime_response_generic_model['response_length_option'] = 'testString' + runtime_response_generic_model['search_results'] = [search_results_model] + runtime_response_generic_model['disclaimer'] = 'testString' + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + runtime_intent_model['skill'] = 'testString' + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + runtime_entity_model['skill'] = 'testString' + + dialog_node_action_model = {} # DialogNodeAction + dialog_node_action_model['name'] = 'testString' + dialog_node_action_model['type'] = 'client' + dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'} + dialog_node_action_model['result_variable'] = 'testString' + dialog_node_action_model['credentials'] = 'testString' + + dialog_node_visited_model = {} # DialogNodeVisited + dialog_node_visited_model['dialog_node'] = 'testString' + dialog_node_visited_model['title'] = 'testString' + dialog_node_visited_model['conditions'] = 'testString' + + log_message_source_model = {} # LogMessageSourceDialogNode + log_message_source_model['type'] = 'dialog_node' + log_message_source_model['dialog_node'] = 'testString' + + dialog_log_message_model = {} # DialogLogMessage + dialog_log_message_model['level'] = 'info' + dialog_log_message_model['message'] = 'testString' + dialog_log_message_model['code'] = 'testString' + dialog_log_message_model['source'] = log_message_source_model + + turn_event_action_source_model = {} # TurnEventActionSource + turn_event_action_source_model['type'] = 'action' + turn_event_action_source_model['action'] = 'testString' + turn_event_action_source_model['action_title'] = 'testString' + turn_event_action_source_model['condition'] = 'testString' + + message_output_debug_turn_event_model = {} # MessageOutputDebugTurnEventTurnEventActionVisited + message_output_debug_turn_event_model['event'] = 'action_visited' + message_output_debug_turn_event_model['source'] = turn_event_action_source_model + message_output_debug_turn_event_model['action_start_time'] = 'testString' + message_output_debug_turn_event_model['condition_type'] = 'user_defined' + message_output_debug_turn_event_model['reason'] = 'intent' + message_output_debug_turn_event_model['result_variable'] = 'testString' + + message_output_debug_model = {} # MessageOutputDebug + message_output_debug_model['nodes_visited'] = [dialog_node_visited_model] + message_output_debug_model['log_messages'] = [dialog_log_message_model] + message_output_debug_model['branch_exited'] = True + message_output_debug_model['branch_exited_reason'] = 'completed' + message_output_debug_model['turn_events'] = [message_output_debug_turn_event_model] + + message_output_spelling_model = {} # MessageOutputSpelling + message_output_spelling_model['text'] = 'testString' + message_output_spelling_model['original_text'] = 'testString' + message_output_spelling_model['suggested_text'] = 'testString' + + message_output_llm_metadata_model = {} # MessageOutputLLMMetadata + message_output_llm_metadata_model['task'] = 'testString' + message_output_llm_metadata_model['model_id'] = 'testString' + + message_output_model = {} # MessageOutput + message_output_model['generic'] = [runtime_response_generic_model] + message_output_model['intents'] = [runtime_intent_model] + message_output_model['entities'] = [runtime_entity_model] + message_output_model['actions'] = [dialog_node_action_model] + message_output_model['debug'] = message_output_debug_model + message_output_model['user_defined'] = {'anyKey': 'anyValue'} + message_output_model['spelling'] = message_output_spelling_model + message_output_model['llm_metadata'] = [message_output_llm_metadata_model] + + message_context_global_system_model = {} # MessageContextGlobalSystem + message_context_global_system_model['timezone'] = 'testString' + message_context_global_system_model['user_id'] = 'testString' + message_context_global_system_model['turn_count'] = 38 + message_context_global_system_model['locale'] = 'en-us' + message_context_global_system_model['reference_time'] = 'testString' + message_context_global_system_model['session_start_time'] = 'testString' + message_context_global_system_model['state'] = 'testString' + message_context_global_system_model['skip_user_input'] = True + + message_context_global_model = {} # MessageContextGlobal + message_context_global_model['system'] = message_context_global_system_model + + message_context_skill_system_model = {} # MessageContextSkillSystem + message_context_skill_system_model['state'] = 'testString' + message_context_skill_system_model['foo'] = 'testString' + + message_context_dialog_skill_model = {} # MessageContextDialogSkill + message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'} + message_context_dialog_skill_model['system'] = message_context_skill_system_model + + message_context_action_skill_model = {} # MessageContextActionSkill + message_context_action_skill_model['user_defined'] = {'anyKey': 'anyValue'} + message_context_action_skill_model['system'] = message_context_skill_system_model + message_context_action_skill_model['action_variables'] = {'anyKey': 'anyValue'} + message_context_action_skill_model['skill_variables'] = {'anyKey': 'anyValue'} + + message_context_skills_model = {} # MessageContextSkills + message_context_skills_model['main skill'] = message_context_dialog_skill_model + message_context_skills_model['actions skill'] = message_context_action_skill_model + + message_context_model = {} # MessageContext + message_context_model['global'] = message_context_global_model + message_context_model['skills'] = message_context_skills_model + message_context_model['integrations'] = {'anyKey': 'anyValue'} + + message_input_attachment_model = {} # MessageInputAttachment + message_input_attachment_model['url'] = 'testString' + message_input_attachment_model['media_type'] = 'testString' + + request_analytics_model = {} # RequestAnalytics + request_analytics_model['browser'] = 'testString' + request_analytics_model['device'] = 'testString' + request_analytics_model['pageUrl'] = 'testString' + + message_input_options_spelling_model = {} # MessageInputOptionsSpelling + message_input_options_spelling_model['suggestions'] = True + message_input_options_spelling_model['auto_correct'] = True + + message_input_options_model = {} # MessageInputOptions + message_input_options_model['restart'] = False + message_input_options_model['alternate_intents'] = False + message_input_options_model['async_callout'] = False + message_input_options_model['spelling'] = message_input_options_spelling_model + message_input_options_model['debug'] = False + message_input_options_model['return_context'] = False + message_input_options_model['export'] = False + + message_input_model = {} # MessageInput + message_input_model['message_type'] = 'text' + message_input_model['text'] = 'testString' + message_input_model['intents'] = [runtime_intent_model] + message_input_model['entities'] = [runtime_entity_model] + message_input_model['suggestion_id'] = 'testString' + message_input_model['attachments'] = [message_input_attachment_model] + message_input_model['analytics'] = request_analytics_model + message_input_model['options'] = message_input_options_model + + # Construct a json representation of a StatefulMessageResponse model + stateful_message_response_model_json = {} + stateful_message_response_model_json['output'] = message_output_model + stateful_message_response_model_json['context'] = message_context_model + stateful_message_response_model_json['user_id'] = 'testString' + stateful_message_response_model_json['masked_output'] = message_output_model + stateful_message_response_model_json['masked_input'] = message_input_model + + # Construct a model instance of StatefulMessageResponse by calling from_dict on the json representation + stateful_message_response_model = StatefulMessageResponse.from_dict(stateful_message_response_model_json) + assert stateful_message_response_model != False + + # Construct a model instance of StatefulMessageResponse by calling from_dict on the json representation + stateful_message_response_model_dict = StatefulMessageResponse.from_dict(stateful_message_response_model_json).__dict__ + stateful_message_response_model2 = StatefulMessageResponse(**stateful_message_response_model_dict) + + # Verify the model instances are equivalent + assert stateful_message_response_model == stateful_message_response_model2 + + # Convert model instance back to dict and verify no loss of data + stateful_message_response_model_json2 = stateful_message_response_model.to_dict() + assert stateful_message_response_model_json2 == stateful_message_response_model_json + + +class TestModel_StatelessFinalResponse: + """ + Test Class for StatelessFinalResponse + """ + + def test_stateless_final_response_serialization(self): + """ + Test serialization/deserialization for StatelessFinalResponse + """ + + # Construct dict forms of any model objects needed in order to build this model. + + response_generic_citation_ranges_item_model = {} # ResponseGenericCitationRangesItem + response_generic_citation_ranges_item_model['start'] = 38 + response_generic_citation_ranges_item_model['end'] = 38 + + response_generic_citation_model = {} # ResponseGenericCitation + response_generic_citation_model['title'] = 'testString' + response_generic_citation_model['text'] = 'testString' + response_generic_citation_model['body'] = 'testString' + response_generic_citation_model['search_result_index'] = 38 + response_generic_citation_model['ranges'] = [response_generic_citation_ranges_item_model] + + response_generic_confidence_scores_model = {} # ResponseGenericConfidenceScores + response_generic_confidence_scores_model['threshold'] = 72.5 + response_generic_confidence_scores_model['pre_gen'] = 72.5 + response_generic_confidence_scores_model['post_gen'] = 72.5 + response_generic_confidence_scores_model['extractiveness'] = 72.5 + + search_results_result_metadata_model = {} # SearchResultsResultMetadata + search_results_result_metadata_model['document_retrieval_source'] = 'testString' + search_results_result_metadata_model['score'] = 38 + + search_results_model = {} # SearchResults + search_results_model['result_metadata'] = search_results_result_metadata_model + search_results_model['id'] = 'testString' + search_results_model['title'] = 'testString' + search_results_model['body'] = 'testString' + + runtime_response_generic_model = {} # RuntimeResponseGenericRuntimeResponseTypeConversationalSearch + runtime_response_generic_model['response_type'] = 'conversation_search' + runtime_response_generic_model['text'] = 'testString' + runtime_response_generic_model['citations_title'] = 'testString' + runtime_response_generic_model['citations'] = [response_generic_citation_model] + runtime_response_generic_model['confidence_scores'] = response_generic_confidence_scores_model + runtime_response_generic_model['response_length_option'] = 'testString' + runtime_response_generic_model['search_results'] = [search_results_model] + runtime_response_generic_model['disclaimer'] = 'testString' + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + runtime_intent_model['skill'] = 'testString' + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + runtime_entity_model['skill'] = 'testString' + + dialog_node_action_model = {} # DialogNodeAction + dialog_node_action_model['name'] = 'testString' + dialog_node_action_model['type'] = 'client' + dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'} + dialog_node_action_model['result_variable'] = 'testString' + dialog_node_action_model['credentials'] = 'testString' + + dialog_node_visited_model = {} # DialogNodeVisited + dialog_node_visited_model['dialog_node'] = 'testString' + dialog_node_visited_model['title'] = 'testString' + dialog_node_visited_model['conditions'] = 'testString' + + log_message_source_model = {} # LogMessageSourceDialogNode + log_message_source_model['type'] = 'dialog_node' + log_message_source_model['dialog_node'] = 'testString' + + dialog_log_message_model = {} # DialogLogMessage + dialog_log_message_model['level'] = 'info' + dialog_log_message_model['message'] = 'testString' + dialog_log_message_model['code'] = 'testString' + dialog_log_message_model['source'] = log_message_source_model + + turn_event_action_source_model = {} # TurnEventActionSource + turn_event_action_source_model['type'] = 'action' + turn_event_action_source_model['action'] = 'testString' + turn_event_action_source_model['action_title'] = 'testString' + turn_event_action_source_model['condition'] = 'testString' + + message_output_debug_turn_event_model = {} # MessageOutputDebugTurnEventTurnEventActionVisited + message_output_debug_turn_event_model['event'] = 'action_visited' + message_output_debug_turn_event_model['source'] = turn_event_action_source_model + message_output_debug_turn_event_model['action_start_time'] = 'testString' + message_output_debug_turn_event_model['condition_type'] = 'user_defined' + message_output_debug_turn_event_model['reason'] = 'intent' + message_output_debug_turn_event_model['result_variable'] = 'testString' + + message_output_debug_model = {} # MessageOutputDebug + message_output_debug_model['nodes_visited'] = [dialog_node_visited_model] + message_output_debug_model['log_messages'] = [dialog_log_message_model] + message_output_debug_model['branch_exited'] = True + message_output_debug_model['branch_exited_reason'] = 'completed' + message_output_debug_model['turn_events'] = [message_output_debug_turn_event_model] + + message_output_spelling_model = {} # MessageOutputSpelling + message_output_spelling_model['text'] = 'testString' + message_output_spelling_model['original_text'] = 'testString' + message_output_spelling_model['suggested_text'] = 'testString' + + message_output_llm_metadata_model = {} # MessageOutputLLMMetadata + message_output_llm_metadata_model['task'] = 'testString' + message_output_llm_metadata_model['model_id'] = 'testString' + + message_context_global_system_model = {} # MessageContextGlobalSystem + message_context_global_system_model['timezone'] = 'testString' + message_context_global_system_model['user_id'] = 'testString' + message_context_global_system_model['turn_count'] = 38 + message_context_global_system_model['locale'] = 'en-us' + message_context_global_system_model['reference_time'] = 'testString' + message_context_global_system_model['session_start_time'] = 'testString' + message_context_global_system_model['state'] = 'testString' + message_context_global_system_model['skip_user_input'] = True + + stateless_message_context_global_model = {} # StatelessMessageContextGlobal + stateless_message_context_global_model['system'] = message_context_global_system_model + stateless_message_context_global_model['session_id'] = 'testString' + + message_context_skill_system_model = {} # MessageContextSkillSystem + message_context_skill_system_model['state'] = 'testString' + message_context_skill_system_model['foo'] = 'testString' + + message_context_dialog_skill_model = {} # MessageContextDialogSkill + message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'} + message_context_dialog_skill_model['system'] = message_context_skill_system_model + + stateless_message_context_skills_actions_skill_model = {} # StatelessMessageContextSkillsActionsSkill + stateless_message_context_skills_actions_skill_model['user_defined'] = {'anyKey': 'anyValue'} + stateless_message_context_skills_actions_skill_model['system'] = message_context_skill_system_model + stateless_message_context_skills_actions_skill_model['action_variables'] = {'anyKey': 'anyValue'} + stateless_message_context_skills_actions_skill_model['skill_variables'] = {'anyKey': 'anyValue'} + stateless_message_context_skills_actions_skill_model['private_action_variables'] = {'anyKey': 'anyValue'} + stateless_message_context_skills_actions_skill_model['private_skill_variables'] = {'anyKey': 'anyValue'} + + stateless_message_context_skills_model = {} # StatelessMessageContextSkills + stateless_message_context_skills_model['main skill'] = message_context_dialog_skill_model + stateless_message_context_skills_model['actions skill'] = stateless_message_context_skills_actions_skill_model + + stateless_message_context_model = {} # StatelessMessageContext + stateless_message_context_model['global'] = stateless_message_context_global_model + stateless_message_context_model['skills'] = stateless_message_context_skills_model + stateless_message_context_model['integrations'] = {'anyKey': 'anyValue'} + + stateless_final_response_output_model = {} # StatelessFinalResponseOutput + stateless_final_response_output_model['generic'] = [runtime_response_generic_model] + stateless_final_response_output_model['intents'] = [runtime_intent_model] + stateless_final_response_output_model['entities'] = [runtime_entity_model] + stateless_final_response_output_model['actions'] = [dialog_node_action_model] + stateless_final_response_output_model['debug'] = message_output_debug_model + stateless_final_response_output_model['user_defined'] = {'anyKey': 'anyValue'} + stateless_final_response_output_model['spelling'] = message_output_spelling_model + stateless_final_response_output_model['llm_metadata'] = [message_output_llm_metadata_model] + stateless_final_response_output_model['streaming_metadata'] = stateless_message_context_model + + # Construct a json representation of a StatelessFinalResponse model + stateless_final_response_model_json = {} + stateless_final_response_model_json['output'] = stateless_final_response_output_model + stateless_final_response_model_json['context'] = stateless_message_context_model + stateless_final_response_model_json['user_id'] = 'testString' + + # Construct a model instance of StatelessFinalResponse by calling from_dict on the json representation + stateless_final_response_model = StatelessFinalResponse.from_dict(stateless_final_response_model_json) + assert stateless_final_response_model != False + + # Construct a model instance of StatelessFinalResponse by calling from_dict on the json representation + stateless_final_response_model_dict = StatelessFinalResponse.from_dict(stateless_final_response_model_json).__dict__ + stateless_final_response_model2 = StatelessFinalResponse(**stateless_final_response_model_dict) + + # Verify the model instances are equivalent + assert stateless_final_response_model == stateless_final_response_model2 + + # Convert model instance back to dict and verify no loss of data + stateless_final_response_model_json2 = stateless_final_response_model.to_dict() + assert stateless_final_response_model_json2 == stateless_final_response_model_json + + +class TestModel_StatelessFinalResponseOutput: + """ + Test Class for StatelessFinalResponseOutput + """ + + def test_stateless_final_response_output_serialization(self): + """ + Test serialization/deserialization for StatelessFinalResponseOutput + """ + + # Construct dict forms of any model objects needed in order to build this model. + + response_generic_citation_ranges_item_model = {} # ResponseGenericCitationRangesItem + response_generic_citation_ranges_item_model['start'] = 38 + response_generic_citation_ranges_item_model['end'] = 38 + + response_generic_citation_model = {} # ResponseGenericCitation + response_generic_citation_model['title'] = 'testString' + response_generic_citation_model['text'] = 'testString' + response_generic_citation_model['body'] = 'testString' + response_generic_citation_model['search_result_index'] = 38 + response_generic_citation_model['ranges'] = [response_generic_citation_ranges_item_model] + + response_generic_confidence_scores_model = {} # ResponseGenericConfidenceScores + response_generic_confidence_scores_model['threshold'] = 72.5 + response_generic_confidence_scores_model['pre_gen'] = 72.5 + response_generic_confidence_scores_model['post_gen'] = 72.5 + response_generic_confidence_scores_model['extractiveness'] = 72.5 + + search_results_result_metadata_model = {} # SearchResultsResultMetadata + search_results_result_metadata_model['document_retrieval_source'] = 'testString' + search_results_result_metadata_model['score'] = 38 + + search_results_model = {} # SearchResults + search_results_model['result_metadata'] = search_results_result_metadata_model + search_results_model['id'] = 'testString' + search_results_model['title'] = 'testString' + search_results_model['body'] = 'testString' + + runtime_response_generic_model = {} # RuntimeResponseGenericRuntimeResponseTypeConversationalSearch + runtime_response_generic_model['response_type'] = 'conversation_search' + runtime_response_generic_model['text'] = 'testString' + runtime_response_generic_model['citations_title'] = 'testString' + runtime_response_generic_model['citations'] = [response_generic_citation_model] + runtime_response_generic_model['confidence_scores'] = response_generic_confidence_scores_model + runtime_response_generic_model['response_length_option'] = 'testString' + runtime_response_generic_model['search_results'] = [search_results_model] + runtime_response_generic_model['disclaimer'] = 'testString' + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + runtime_intent_model['skill'] = 'testString' + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + runtime_entity_model['skill'] = 'testString' + + dialog_node_action_model = {} # DialogNodeAction + dialog_node_action_model['name'] = 'testString' + dialog_node_action_model['type'] = 'client' + dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'} + dialog_node_action_model['result_variable'] = 'testString' + dialog_node_action_model['credentials'] = 'testString' + + dialog_node_visited_model = {} # DialogNodeVisited + dialog_node_visited_model['dialog_node'] = 'testString' + dialog_node_visited_model['title'] = 'testString' + dialog_node_visited_model['conditions'] = 'testString' + + log_message_source_model = {} # LogMessageSourceDialogNode + log_message_source_model['type'] = 'dialog_node' + log_message_source_model['dialog_node'] = 'testString' + + dialog_log_message_model = {} # DialogLogMessage + dialog_log_message_model['level'] = 'info' + dialog_log_message_model['message'] = 'testString' + dialog_log_message_model['code'] = 'testString' + dialog_log_message_model['source'] = log_message_source_model + + turn_event_action_source_model = {} # TurnEventActionSource + turn_event_action_source_model['type'] = 'action' + turn_event_action_source_model['action'] = 'testString' + turn_event_action_source_model['action_title'] = 'testString' + turn_event_action_source_model['condition'] = 'testString' + + message_output_debug_turn_event_model = {} # MessageOutputDebugTurnEventTurnEventActionVisited + message_output_debug_turn_event_model['event'] = 'action_visited' + message_output_debug_turn_event_model['source'] = turn_event_action_source_model + message_output_debug_turn_event_model['action_start_time'] = 'testString' + message_output_debug_turn_event_model['condition_type'] = 'user_defined' + message_output_debug_turn_event_model['reason'] = 'intent' + message_output_debug_turn_event_model['result_variable'] = 'testString' + + message_output_debug_model = {} # MessageOutputDebug + message_output_debug_model['nodes_visited'] = [dialog_node_visited_model] + message_output_debug_model['log_messages'] = [dialog_log_message_model] + message_output_debug_model['branch_exited'] = True + message_output_debug_model['branch_exited_reason'] = 'completed' + message_output_debug_model['turn_events'] = [message_output_debug_turn_event_model] + + message_output_spelling_model = {} # MessageOutputSpelling + message_output_spelling_model['text'] = 'testString' + message_output_spelling_model['original_text'] = 'testString' + message_output_spelling_model['suggested_text'] = 'testString' + + message_output_llm_metadata_model = {} # MessageOutputLLMMetadata + message_output_llm_metadata_model['task'] = 'testString' + message_output_llm_metadata_model['model_id'] = 'testString' + + message_context_global_system_model = {} # MessageContextGlobalSystem + message_context_global_system_model['timezone'] = 'testString' + message_context_global_system_model['user_id'] = 'testString' + message_context_global_system_model['turn_count'] = 38 + message_context_global_system_model['locale'] = 'en-us' + message_context_global_system_model['reference_time'] = 'testString' + message_context_global_system_model['session_start_time'] = 'testString' + message_context_global_system_model['state'] = 'testString' + message_context_global_system_model['skip_user_input'] = True + + stateless_message_context_global_model = {} # StatelessMessageContextGlobal + stateless_message_context_global_model['system'] = message_context_global_system_model + stateless_message_context_global_model['session_id'] = 'testString' + + message_context_skill_system_model = {} # MessageContextSkillSystem + message_context_skill_system_model['state'] = 'testString' + message_context_skill_system_model['foo'] = 'testString' + + message_context_dialog_skill_model = {} # MessageContextDialogSkill + message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'} + message_context_dialog_skill_model['system'] = message_context_skill_system_model + + stateless_message_context_skills_actions_skill_model = {} # StatelessMessageContextSkillsActionsSkill + stateless_message_context_skills_actions_skill_model['user_defined'] = {'anyKey': 'anyValue'} + stateless_message_context_skills_actions_skill_model['system'] = message_context_skill_system_model + stateless_message_context_skills_actions_skill_model['action_variables'] = {'anyKey': 'anyValue'} + stateless_message_context_skills_actions_skill_model['skill_variables'] = {'anyKey': 'anyValue'} + stateless_message_context_skills_actions_skill_model['private_action_variables'] = {'anyKey': 'anyValue'} + stateless_message_context_skills_actions_skill_model['private_skill_variables'] = {'anyKey': 'anyValue'} + + stateless_message_context_skills_model = {} # StatelessMessageContextSkills + stateless_message_context_skills_model['main skill'] = message_context_dialog_skill_model + stateless_message_context_skills_model['actions skill'] = stateless_message_context_skills_actions_skill_model + + stateless_message_context_model = {} # StatelessMessageContext + stateless_message_context_model['global'] = stateless_message_context_global_model + stateless_message_context_model['skills'] = stateless_message_context_skills_model + stateless_message_context_model['integrations'] = {'anyKey': 'anyValue'} + + # Construct a json representation of a StatelessFinalResponseOutput model + stateless_final_response_output_model_json = {} + stateless_final_response_output_model_json['generic'] = [runtime_response_generic_model] + stateless_final_response_output_model_json['intents'] = [runtime_intent_model] + stateless_final_response_output_model_json['entities'] = [runtime_entity_model] + stateless_final_response_output_model_json['actions'] = [dialog_node_action_model] + stateless_final_response_output_model_json['debug'] = message_output_debug_model + stateless_final_response_output_model_json['user_defined'] = {'anyKey': 'anyValue'} + stateless_final_response_output_model_json['spelling'] = message_output_spelling_model + stateless_final_response_output_model_json['llm_metadata'] = [message_output_llm_metadata_model] + stateless_final_response_output_model_json['streaming_metadata'] = stateless_message_context_model + + # Construct a model instance of StatelessFinalResponseOutput by calling from_dict on the json representation + stateless_final_response_output_model = StatelessFinalResponseOutput.from_dict(stateless_final_response_output_model_json) + assert stateless_final_response_output_model != False + + # Construct a model instance of StatelessFinalResponseOutput by calling from_dict on the json representation + stateless_final_response_output_model_dict = StatelessFinalResponseOutput.from_dict(stateless_final_response_output_model_json).__dict__ + stateless_final_response_output_model2 = StatelessFinalResponseOutput(**stateless_final_response_output_model_dict) + + # Verify the model instances are equivalent + assert stateless_final_response_output_model == stateless_final_response_output_model2 + + # Convert model instance back to dict and verify no loss of data + stateless_final_response_output_model_json2 = stateless_final_response_output_model.to_dict() + assert stateless_final_response_output_model_json2 == stateless_final_response_output_model_json + + +class TestModel_StatelessMessageContext: + """ + Test Class for StatelessMessageContext + """ + + def test_stateless_message_context_serialization(self): + """ + Test serialization/deserialization for StatelessMessageContext + """ + + # Construct dict forms of any model objects needed in order to build this model. + + message_context_global_system_model = {} # MessageContextGlobalSystem + message_context_global_system_model['timezone'] = 'testString' + message_context_global_system_model['user_id'] = 'testString' + message_context_global_system_model['turn_count'] = 38 + message_context_global_system_model['locale'] = 'en-us' + message_context_global_system_model['reference_time'] = 'testString' + message_context_global_system_model['session_start_time'] = 'testString' + message_context_global_system_model['state'] = 'testString' + message_context_global_system_model['skip_user_input'] = True + + stateless_message_context_global_model = {} # StatelessMessageContextGlobal + stateless_message_context_global_model['system'] = message_context_global_system_model + stateless_message_context_global_model['session_id'] = 'testString' + + message_context_skill_system_model = {} # MessageContextSkillSystem + message_context_skill_system_model['state'] = 'testString' + message_context_skill_system_model['foo'] = 'testString' + + message_context_dialog_skill_model = {} # MessageContextDialogSkill + message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'} + message_context_dialog_skill_model['system'] = message_context_skill_system_model + + stateless_message_context_skills_actions_skill_model = {} # StatelessMessageContextSkillsActionsSkill + stateless_message_context_skills_actions_skill_model['user_defined'] = {'anyKey': 'anyValue'} + stateless_message_context_skills_actions_skill_model['system'] = message_context_skill_system_model + stateless_message_context_skills_actions_skill_model['action_variables'] = {'anyKey': 'anyValue'} + stateless_message_context_skills_actions_skill_model['skill_variables'] = {'anyKey': 'anyValue'} + stateless_message_context_skills_actions_skill_model['private_action_variables'] = {'anyKey': 'anyValue'} + stateless_message_context_skills_actions_skill_model['private_skill_variables'] = {'anyKey': 'anyValue'} + + stateless_message_context_skills_model = {} # StatelessMessageContextSkills + stateless_message_context_skills_model['main skill'] = message_context_dialog_skill_model + stateless_message_context_skills_model['actions skill'] = stateless_message_context_skills_actions_skill_model + + # Construct a json representation of a StatelessMessageContext model + stateless_message_context_model_json = {} + stateless_message_context_model_json['global'] = stateless_message_context_global_model + stateless_message_context_model_json['skills'] = stateless_message_context_skills_model + stateless_message_context_model_json['integrations'] = {'anyKey': 'anyValue'} + + # Construct a model instance of StatelessMessageContext by calling from_dict on the json representation + stateless_message_context_model = StatelessMessageContext.from_dict(stateless_message_context_model_json) + assert stateless_message_context_model != False + + # Construct a model instance of StatelessMessageContext by calling from_dict on the json representation + stateless_message_context_model_dict = StatelessMessageContext.from_dict(stateless_message_context_model_json).__dict__ + stateless_message_context_model2 = StatelessMessageContext(**stateless_message_context_model_dict) + + # Verify the model instances are equivalent + assert stateless_message_context_model == stateless_message_context_model2 + + # Convert model instance back to dict and verify no loss of data + stateless_message_context_model_json2 = stateless_message_context_model.to_dict() + assert stateless_message_context_model_json2 == stateless_message_context_model_json + + +class TestModel_StatelessMessageContextGlobal: + """ + Test Class for StatelessMessageContextGlobal + """ + + def test_stateless_message_context_global_serialization(self): + """ + Test serialization/deserialization for StatelessMessageContextGlobal + """ + + # Construct dict forms of any model objects needed in order to build this model. + + message_context_global_system_model = {} # MessageContextGlobalSystem + message_context_global_system_model['timezone'] = 'testString' + message_context_global_system_model['user_id'] = 'testString' + message_context_global_system_model['turn_count'] = 38 + message_context_global_system_model['locale'] = 'en-us' + message_context_global_system_model['reference_time'] = 'testString' + message_context_global_system_model['session_start_time'] = 'testString' + message_context_global_system_model['state'] = 'testString' + message_context_global_system_model['skip_user_input'] = True + + # Construct a json representation of a StatelessMessageContextGlobal model + stateless_message_context_global_model_json = {} + stateless_message_context_global_model_json['system'] = message_context_global_system_model + stateless_message_context_global_model_json['session_id'] = 'testString' + + # Construct a model instance of StatelessMessageContextGlobal by calling from_dict on the json representation + stateless_message_context_global_model = StatelessMessageContextGlobal.from_dict(stateless_message_context_global_model_json) + assert stateless_message_context_global_model != False + + # Construct a model instance of StatelessMessageContextGlobal by calling from_dict on the json representation + stateless_message_context_global_model_dict = StatelessMessageContextGlobal.from_dict(stateless_message_context_global_model_json).__dict__ + stateless_message_context_global_model2 = StatelessMessageContextGlobal(**stateless_message_context_global_model_dict) + + # Verify the model instances are equivalent + assert stateless_message_context_global_model == stateless_message_context_global_model2 + + # Convert model instance back to dict and verify no loss of data + stateless_message_context_global_model_json2 = stateless_message_context_global_model.to_dict() + assert stateless_message_context_global_model_json2 == stateless_message_context_global_model_json + + +class TestModel_StatelessMessageContextSkills: + """ + Test Class for StatelessMessageContextSkills + """ + + def test_stateless_message_context_skills_serialization(self): + """ + Test serialization/deserialization for StatelessMessageContextSkills + """ + + # Construct dict forms of any model objects needed in order to build this model. + + message_context_skill_system_model = {} # MessageContextSkillSystem + message_context_skill_system_model['state'] = 'testString' + message_context_skill_system_model['foo'] = 'testString' + + message_context_dialog_skill_model = {} # MessageContextDialogSkill + message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'} + message_context_dialog_skill_model['system'] = message_context_skill_system_model + + stateless_message_context_skills_actions_skill_model = {} # StatelessMessageContextSkillsActionsSkill + stateless_message_context_skills_actions_skill_model['user_defined'] = {'anyKey': 'anyValue'} + stateless_message_context_skills_actions_skill_model['system'] = message_context_skill_system_model + stateless_message_context_skills_actions_skill_model['action_variables'] = {'anyKey': 'anyValue'} + stateless_message_context_skills_actions_skill_model['skill_variables'] = {'anyKey': 'anyValue'} + stateless_message_context_skills_actions_skill_model['private_action_variables'] = {'anyKey': 'anyValue'} + stateless_message_context_skills_actions_skill_model['private_skill_variables'] = {'anyKey': 'anyValue'} + + # Construct a json representation of a StatelessMessageContextSkills model + stateless_message_context_skills_model_json = {} + stateless_message_context_skills_model_json['main skill'] = message_context_dialog_skill_model + stateless_message_context_skills_model_json['actions skill'] = stateless_message_context_skills_actions_skill_model + + # Construct a model instance of StatelessMessageContextSkills by calling from_dict on the json representation + stateless_message_context_skills_model = StatelessMessageContextSkills.from_dict(stateless_message_context_skills_model_json) + assert stateless_message_context_skills_model != False + + # Construct a model instance of StatelessMessageContextSkills by calling from_dict on the json representation + stateless_message_context_skills_model_dict = StatelessMessageContextSkills.from_dict(stateless_message_context_skills_model_json).__dict__ + stateless_message_context_skills_model2 = StatelessMessageContextSkills(**stateless_message_context_skills_model_dict) + + # Verify the model instances are equivalent + assert stateless_message_context_skills_model == stateless_message_context_skills_model2 + + # Convert model instance back to dict and verify no loss of data + stateless_message_context_skills_model_json2 = stateless_message_context_skills_model.to_dict() + assert stateless_message_context_skills_model_json2 == stateless_message_context_skills_model_json + + +class TestModel_StatelessMessageContextSkillsActionsSkill: + """ + Test Class for StatelessMessageContextSkillsActionsSkill + """ + + def test_stateless_message_context_skills_actions_skill_serialization(self): + """ + Test serialization/deserialization for StatelessMessageContextSkillsActionsSkill + """ + + # Construct dict forms of any model objects needed in order to build this model. + + message_context_skill_system_model = {} # MessageContextSkillSystem + message_context_skill_system_model['state'] = 'testString' + message_context_skill_system_model['foo'] = 'testString' + + # Construct a json representation of a StatelessMessageContextSkillsActionsSkill model + stateless_message_context_skills_actions_skill_model_json = {} + stateless_message_context_skills_actions_skill_model_json['user_defined'] = {'anyKey': 'anyValue'} + stateless_message_context_skills_actions_skill_model_json['system'] = message_context_skill_system_model + stateless_message_context_skills_actions_skill_model_json['action_variables'] = {'anyKey': 'anyValue'} + stateless_message_context_skills_actions_skill_model_json['skill_variables'] = {'anyKey': 'anyValue'} + stateless_message_context_skills_actions_skill_model_json['private_action_variables'] = {'anyKey': 'anyValue'} + stateless_message_context_skills_actions_skill_model_json['private_skill_variables'] = {'anyKey': 'anyValue'} + + # Construct a model instance of StatelessMessageContextSkillsActionsSkill by calling from_dict on the json representation + stateless_message_context_skills_actions_skill_model = StatelessMessageContextSkillsActionsSkill.from_dict(stateless_message_context_skills_actions_skill_model_json) + assert stateless_message_context_skills_actions_skill_model != False + + # Construct a model instance of StatelessMessageContextSkillsActionsSkill by calling from_dict on the json representation + stateless_message_context_skills_actions_skill_model_dict = StatelessMessageContextSkillsActionsSkill.from_dict(stateless_message_context_skills_actions_skill_model_json).__dict__ + stateless_message_context_skills_actions_skill_model2 = StatelessMessageContextSkillsActionsSkill(**stateless_message_context_skills_actions_skill_model_dict) + + # Verify the model instances are equivalent + assert stateless_message_context_skills_actions_skill_model == stateless_message_context_skills_actions_skill_model2 + + # Convert model instance back to dict and verify no loss of data + stateless_message_context_skills_actions_skill_model_json2 = stateless_message_context_skills_actions_skill_model.to_dict() + assert stateless_message_context_skills_actions_skill_model_json2 == stateless_message_context_skills_actions_skill_model_json + + +class TestModel_StatelessMessageInput: + """ + Test Class for StatelessMessageInput + """ + + def test_stateless_message_input_serialization(self): + """ + Test serialization/deserialization for StatelessMessageInput + """ + + # Construct dict forms of any model objects needed in order to build this model. + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + runtime_intent_model['skill'] = 'testString' + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + runtime_entity_model['skill'] = 'testString' + + message_input_attachment_model = {} # MessageInputAttachment + message_input_attachment_model['url'] = 'testString' + message_input_attachment_model['media_type'] = 'testString' + + request_analytics_model = {} # RequestAnalytics + request_analytics_model['browser'] = 'testString' + request_analytics_model['device'] = 'testString' + request_analytics_model['pageUrl'] = 'testString' + + message_input_options_spelling_model = {} # MessageInputOptionsSpelling + message_input_options_spelling_model['suggestions'] = True + message_input_options_spelling_model['auto_correct'] = True + + stateless_message_input_options_model = {} # StatelessMessageInputOptions + stateless_message_input_options_model['restart'] = False + stateless_message_input_options_model['alternate_intents'] = False + stateless_message_input_options_model['async_callout'] = False + stateless_message_input_options_model['spelling'] = message_input_options_spelling_model + stateless_message_input_options_model['debug'] = False + + # Construct a json representation of a StatelessMessageInput model + stateless_message_input_model_json = {} + stateless_message_input_model_json['message_type'] = 'text' + stateless_message_input_model_json['text'] = 'testString' + stateless_message_input_model_json['intents'] = [runtime_intent_model] + stateless_message_input_model_json['entities'] = [runtime_entity_model] + stateless_message_input_model_json['suggestion_id'] = 'testString' + stateless_message_input_model_json['attachments'] = [message_input_attachment_model] + stateless_message_input_model_json['analytics'] = request_analytics_model + stateless_message_input_model_json['options'] = stateless_message_input_options_model + + # Construct a model instance of StatelessMessageInput by calling from_dict on the json representation + stateless_message_input_model = StatelessMessageInput.from_dict(stateless_message_input_model_json) + assert stateless_message_input_model != False + + # Construct a model instance of StatelessMessageInput by calling from_dict on the json representation + stateless_message_input_model_dict = StatelessMessageInput.from_dict(stateless_message_input_model_json).__dict__ + stateless_message_input_model2 = StatelessMessageInput(**stateless_message_input_model_dict) + + # Verify the model instances are equivalent + assert stateless_message_input_model == stateless_message_input_model2 + + # Convert model instance back to dict and verify no loss of data + stateless_message_input_model_json2 = stateless_message_input_model.to_dict() + assert stateless_message_input_model_json2 == stateless_message_input_model_json + + +class TestModel_StatelessMessageInputOptions: + """ + Test Class for StatelessMessageInputOptions + """ + + def test_stateless_message_input_options_serialization(self): + """ + Test serialization/deserialization for StatelessMessageInputOptions + """ + + # Construct dict forms of any model objects needed in order to build this model. + + message_input_options_spelling_model = {} # MessageInputOptionsSpelling + message_input_options_spelling_model['suggestions'] = True + message_input_options_spelling_model['auto_correct'] = True + + # Construct a json representation of a StatelessMessageInputOptions model + stateless_message_input_options_model_json = {} + stateless_message_input_options_model_json['restart'] = False + stateless_message_input_options_model_json['alternate_intents'] = False + stateless_message_input_options_model_json['async_callout'] = False + stateless_message_input_options_model_json['spelling'] = message_input_options_spelling_model + stateless_message_input_options_model_json['debug'] = False + + # Construct a model instance of StatelessMessageInputOptions by calling from_dict on the json representation + stateless_message_input_options_model = StatelessMessageInputOptions.from_dict(stateless_message_input_options_model_json) + assert stateless_message_input_options_model != False + + # Construct a model instance of StatelessMessageInputOptions by calling from_dict on the json representation + stateless_message_input_options_model_dict = StatelessMessageInputOptions.from_dict(stateless_message_input_options_model_json).__dict__ + stateless_message_input_options_model2 = StatelessMessageInputOptions(**stateless_message_input_options_model_dict) + + # Verify the model instances are equivalent + assert stateless_message_input_options_model == stateless_message_input_options_model2 + + # Convert model instance back to dict and verify no loss of data + stateless_message_input_options_model_json2 = stateless_message_input_options_model.to_dict() + assert stateless_message_input_options_model_json2 == stateless_message_input_options_model_json + + +class TestModel_StatelessMessageResponse: + """ + Test Class for StatelessMessageResponse + """ + + def test_stateless_message_response_serialization(self): + """ + Test serialization/deserialization for StatelessMessageResponse + """ + + # Construct dict forms of any model objects needed in order to build this model. + + response_generic_citation_ranges_item_model = {} # ResponseGenericCitationRangesItem + response_generic_citation_ranges_item_model['start'] = 38 + response_generic_citation_ranges_item_model['end'] = 38 + + response_generic_citation_model = {} # ResponseGenericCitation + response_generic_citation_model['title'] = 'testString' + response_generic_citation_model['text'] = 'testString' + response_generic_citation_model['body'] = 'testString' + response_generic_citation_model['search_result_index'] = 38 + response_generic_citation_model['ranges'] = [response_generic_citation_ranges_item_model] + + response_generic_confidence_scores_model = {} # ResponseGenericConfidenceScores + response_generic_confidence_scores_model['threshold'] = 72.5 + response_generic_confidence_scores_model['pre_gen'] = 72.5 + response_generic_confidence_scores_model['post_gen'] = 72.5 + response_generic_confidence_scores_model['extractiveness'] = 72.5 + + search_results_result_metadata_model = {} # SearchResultsResultMetadata + search_results_result_metadata_model['document_retrieval_source'] = 'testString' + search_results_result_metadata_model['score'] = 38 + + search_results_model = {} # SearchResults + search_results_model['result_metadata'] = search_results_result_metadata_model + search_results_model['id'] = 'testString' + search_results_model['title'] = 'testString' + search_results_model['body'] = 'testString' + + runtime_response_generic_model = {} # RuntimeResponseGenericRuntimeResponseTypeConversationalSearch + runtime_response_generic_model['response_type'] = 'conversation_search' + runtime_response_generic_model['text'] = 'testString' + runtime_response_generic_model['citations_title'] = 'testString' + runtime_response_generic_model['citations'] = [response_generic_citation_model] + runtime_response_generic_model['confidence_scores'] = response_generic_confidence_scores_model + runtime_response_generic_model['response_length_option'] = 'testString' + runtime_response_generic_model['search_results'] = [search_results_model] + runtime_response_generic_model['disclaimer'] = 'testString' + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + runtime_intent_model['skill'] = 'testString' + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + runtime_entity_model['skill'] = 'testString' + + dialog_node_action_model = {} # DialogNodeAction + dialog_node_action_model['name'] = 'testString' + dialog_node_action_model['type'] = 'client' + dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'} + dialog_node_action_model['result_variable'] = 'testString' + dialog_node_action_model['credentials'] = 'testString' + + dialog_node_visited_model = {} # DialogNodeVisited + dialog_node_visited_model['dialog_node'] = 'testString' + dialog_node_visited_model['title'] = 'testString' + dialog_node_visited_model['conditions'] = 'testString' + + log_message_source_model = {} # LogMessageSourceDialogNode + log_message_source_model['type'] = 'dialog_node' + log_message_source_model['dialog_node'] = 'testString' + + dialog_log_message_model = {} # DialogLogMessage + dialog_log_message_model['level'] = 'info' + dialog_log_message_model['message'] = 'testString' + dialog_log_message_model['code'] = 'testString' + dialog_log_message_model['source'] = log_message_source_model + + turn_event_action_source_model = {} # TurnEventActionSource + turn_event_action_source_model['type'] = 'action' + turn_event_action_source_model['action'] = 'testString' + turn_event_action_source_model['action_title'] = 'testString' + turn_event_action_source_model['condition'] = 'testString' + + message_output_debug_turn_event_model = {} # MessageOutputDebugTurnEventTurnEventActionVisited + message_output_debug_turn_event_model['event'] = 'action_visited' + message_output_debug_turn_event_model['source'] = turn_event_action_source_model + message_output_debug_turn_event_model['action_start_time'] = 'testString' + message_output_debug_turn_event_model['condition_type'] = 'user_defined' + message_output_debug_turn_event_model['reason'] = 'intent' + message_output_debug_turn_event_model['result_variable'] = 'testString' + + message_output_debug_model = {} # MessageOutputDebug + message_output_debug_model['nodes_visited'] = [dialog_node_visited_model] + message_output_debug_model['log_messages'] = [dialog_log_message_model] + message_output_debug_model['branch_exited'] = True + message_output_debug_model['branch_exited_reason'] = 'completed' + message_output_debug_model['turn_events'] = [message_output_debug_turn_event_model] + + message_output_spelling_model = {} # MessageOutputSpelling + message_output_spelling_model['text'] = 'testString' + message_output_spelling_model['original_text'] = 'testString' + message_output_spelling_model['suggested_text'] = 'testString' + + message_output_llm_metadata_model = {} # MessageOutputLLMMetadata + message_output_llm_metadata_model['task'] = 'testString' + message_output_llm_metadata_model['model_id'] = 'testString' + + message_output_model = {} # MessageOutput + message_output_model['generic'] = [runtime_response_generic_model] + message_output_model['intents'] = [runtime_intent_model] + message_output_model['entities'] = [runtime_entity_model] + message_output_model['actions'] = [dialog_node_action_model] + message_output_model['debug'] = message_output_debug_model + message_output_model['user_defined'] = {'anyKey': 'anyValue'} + message_output_model['spelling'] = message_output_spelling_model + message_output_model['llm_metadata'] = [message_output_llm_metadata_model] + + message_context_global_system_model = {} # MessageContextGlobalSystem + message_context_global_system_model['timezone'] = 'testString' + message_context_global_system_model['user_id'] = 'testString' + message_context_global_system_model['turn_count'] = 38 + message_context_global_system_model['locale'] = 'en-us' + message_context_global_system_model['reference_time'] = 'testString' + message_context_global_system_model['session_start_time'] = 'testString' + message_context_global_system_model['state'] = 'testString' + message_context_global_system_model['skip_user_input'] = True + + stateless_message_context_global_model = {} # StatelessMessageContextGlobal + stateless_message_context_global_model['system'] = message_context_global_system_model + stateless_message_context_global_model['session_id'] = 'testString' + + message_context_skill_system_model = {} # MessageContextSkillSystem + message_context_skill_system_model['state'] = 'testString' + message_context_skill_system_model['foo'] = 'testString' + + message_context_dialog_skill_model = {} # MessageContextDialogSkill + message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'} + message_context_dialog_skill_model['system'] = message_context_skill_system_model + + stateless_message_context_skills_actions_skill_model = {} # StatelessMessageContextSkillsActionsSkill + stateless_message_context_skills_actions_skill_model['user_defined'] = {'anyKey': 'anyValue'} + stateless_message_context_skills_actions_skill_model['system'] = message_context_skill_system_model + stateless_message_context_skills_actions_skill_model['action_variables'] = {'anyKey': 'anyValue'} + stateless_message_context_skills_actions_skill_model['skill_variables'] = {'anyKey': 'anyValue'} + stateless_message_context_skills_actions_skill_model['private_action_variables'] = {'anyKey': 'anyValue'} + stateless_message_context_skills_actions_skill_model['private_skill_variables'] = {'anyKey': 'anyValue'} + + stateless_message_context_skills_model = {} # StatelessMessageContextSkills + stateless_message_context_skills_model['main skill'] = message_context_dialog_skill_model + stateless_message_context_skills_model['actions skill'] = stateless_message_context_skills_actions_skill_model + + stateless_message_context_model = {} # StatelessMessageContext + stateless_message_context_model['global'] = stateless_message_context_global_model + stateless_message_context_model['skills'] = stateless_message_context_skills_model + stateless_message_context_model['integrations'] = {'anyKey': 'anyValue'} + + message_input_attachment_model = {} # MessageInputAttachment + message_input_attachment_model['url'] = 'testString' + message_input_attachment_model['media_type'] = 'testString' + + request_analytics_model = {} # RequestAnalytics + request_analytics_model['browser'] = 'testString' + request_analytics_model['device'] = 'testString' + request_analytics_model['pageUrl'] = 'testString' + + message_input_options_spelling_model = {} # MessageInputOptionsSpelling + message_input_options_spelling_model['suggestions'] = True + message_input_options_spelling_model['auto_correct'] = True + + message_input_options_model = {} # MessageInputOptions + message_input_options_model['restart'] = False + message_input_options_model['alternate_intents'] = False + message_input_options_model['async_callout'] = False + message_input_options_model['spelling'] = message_input_options_spelling_model + message_input_options_model['debug'] = False + message_input_options_model['return_context'] = False + message_input_options_model['export'] = False + + message_input_model = {} # MessageInput + message_input_model['message_type'] = 'text' + message_input_model['text'] = 'testString' + message_input_model['intents'] = [runtime_intent_model] + message_input_model['entities'] = [runtime_entity_model] + message_input_model['suggestion_id'] = 'testString' + message_input_model['attachments'] = [message_input_attachment_model] + message_input_model['analytics'] = request_analytics_model + message_input_model['options'] = message_input_options_model + + # Construct a json representation of a StatelessMessageResponse model + stateless_message_response_model_json = {} + stateless_message_response_model_json['output'] = message_output_model + stateless_message_response_model_json['context'] = stateless_message_context_model + stateless_message_response_model_json['masked_output'] = message_output_model + stateless_message_response_model_json['masked_input'] = message_input_model + stateless_message_response_model_json['user_id'] = 'testString' + + # Construct a model instance of StatelessMessageResponse by calling from_dict on the json representation + stateless_message_response_model = StatelessMessageResponse.from_dict(stateless_message_response_model_json) + assert stateless_message_response_model != False + + # Construct a model instance of StatelessMessageResponse by calling from_dict on the json representation + stateless_message_response_model_dict = StatelessMessageResponse.from_dict(stateless_message_response_model_json).__dict__ + stateless_message_response_model2 = StatelessMessageResponse(**stateless_message_response_model_dict) + + # Verify the model instances are equivalent + assert stateless_message_response_model == stateless_message_response_model2 + + # Convert model instance back to dict and verify no loss of data + stateless_message_response_model_json2 = stateless_message_response_model.to_dict() + assert stateless_message_response_model_json2 == stateless_message_response_model_json + + +class TestModel_StatusError: + """ + Test Class for StatusError + """ + + def test_status_error_serialization(self): + """ + Test serialization/deserialization for StatusError + """ + + # Construct a json representation of a StatusError model + status_error_model_json = {} + status_error_model_json['message'] = 'testString' + + # Construct a model instance of StatusError by calling from_dict on the json representation + status_error_model = StatusError.from_dict(status_error_model_json) + assert status_error_model != False + + # Construct a model instance of StatusError by calling from_dict on the json representation + status_error_model_dict = StatusError.from_dict(status_error_model_json).__dict__ + status_error_model2 = StatusError(**status_error_model_dict) + + # Verify the model instances are equivalent + assert status_error_model == status_error_model2 + + # Convert model instance back to dict and verify no loss of data + status_error_model_json2 = status_error_model.to_dict() + assert status_error_model_json2 == status_error_model_json + + +class TestModel_TurnEventActionSource: + """ + Test Class for TurnEventActionSource + """ + + def test_turn_event_action_source_serialization(self): + """ + Test serialization/deserialization for TurnEventActionSource + """ + + # Construct a json representation of a TurnEventActionSource model + turn_event_action_source_model_json = {} + turn_event_action_source_model_json['type'] = 'action' + turn_event_action_source_model_json['action'] = 'testString' + turn_event_action_source_model_json['action_title'] = 'testString' + turn_event_action_source_model_json['condition'] = 'testString' + + # Construct a model instance of TurnEventActionSource by calling from_dict on the json representation + turn_event_action_source_model = TurnEventActionSource.from_dict(turn_event_action_source_model_json) + assert turn_event_action_source_model != False + + # Construct a model instance of TurnEventActionSource by calling from_dict on the json representation + turn_event_action_source_model_dict = TurnEventActionSource.from_dict(turn_event_action_source_model_json).__dict__ + turn_event_action_source_model2 = TurnEventActionSource(**turn_event_action_source_model_dict) + + # Verify the model instances are equivalent + assert turn_event_action_source_model == turn_event_action_source_model2 + + # Convert model instance back to dict and verify no loss of data + turn_event_action_source_model_json2 = turn_event_action_source_model.to_dict() + assert turn_event_action_source_model_json2 == turn_event_action_source_model_json + + +class TestModel_TurnEventCalloutCallout: + """ + Test Class for TurnEventCalloutCallout + """ + + def test_turn_event_callout_callout_serialization(self): + """ + Test serialization/deserialization for TurnEventCalloutCallout + """ + + # Construct dict forms of any model objects needed in order to build this model. + + turn_event_callout_callout_request_model = {} # TurnEventCalloutCalloutRequest + turn_event_callout_callout_request_model['method'] = 'get' + turn_event_callout_callout_request_model['url'] = 'testString' + turn_event_callout_callout_request_model['path'] = 'testString' + turn_event_callout_callout_request_model['query_parameters'] = 'testString' + turn_event_callout_callout_request_model['headers'] = {'anyKey': 'anyValue'} + turn_event_callout_callout_request_model['body'] = {'anyKey': 'anyValue'} + + turn_event_callout_callout_response_model = {} # TurnEventCalloutCalloutResponse + turn_event_callout_callout_response_model['body'] = 'testString' + turn_event_callout_callout_response_model['status_code'] = 38 + turn_event_callout_callout_response_model['last_event'] = {'anyKey': 'anyValue'} + + # Construct a json representation of a TurnEventCalloutCallout model + turn_event_callout_callout_model_json = {} + turn_event_callout_callout_model_json['type'] = 'integration_interaction' + turn_event_callout_callout_model_json['internal'] = {'anyKey': 'anyValue'} + turn_event_callout_callout_model_json['result_variable'] = 'testString' + turn_event_callout_callout_model_json['request'] = turn_event_callout_callout_request_model + turn_event_callout_callout_model_json['response'] = turn_event_callout_callout_response_model + + # Construct a model instance of TurnEventCalloutCallout by calling from_dict on the json representation + turn_event_callout_callout_model = TurnEventCalloutCallout.from_dict(turn_event_callout_callout_model_json) + assert turn_event_callout_callout_model != False + + # Construct a model instance of TurnEventCalloutCallout by calling from_dict on the json representation + turn_event_callout_callout_model_dict = TurnEventCalloutCallout.from_dict(turn_event_callout_callout_model_json).__dict__ + turn_event_callout_callout_model2 = TurnEventCalloutCallout(**turn_event_callout_callout_model_dict) + + # Verify the model instances are equivalent + assert turn_event_callout_callout_model == turn_event_callout_callout_model2 + + # Convert model instance back to dict and verify no loss of data + turn_event_callout_callout_model_json2 = turn_event_callout_callout_model.to_dict() + assert turn_event_callout_callout_model_json2 == turn_event_callout_callout_model_json + + +class TestModel_TurnEventCalloutCalloutRequest: + """ + Test Class for TurnEventCalloutCalloutRequest + """ + + def test_turn_event_callout_callout_request_serialization(self): + """ + Test serialization/deserialization for TurnEventCalloutCalloutRequest + """ + + # Construct a json representation of a TurnEventCalloutCalloutRequest model + turn_event_callout_callout_request_model_json = {} + turn_event_callout_callout_request_model_json['method'] = 'get' + turn_event_callout_callout_request_model_json['url'] = 'testString' + turn_event_callout_callout_request_model_json['path'] = 'testString' + turn_event_callout_callout_request_model_json['query_parameters'] = 'testString' + turn_event_callout_callout_request_model_json['headers'] = {'anyKey': 'anyValue'} + turn_event_callout_callout_request_model_json['body'] = {'anyKey': 'anyValue'} + + # Construct a model instance of TurnEventCalloutCalloutRequest by calling from_dict on the json representation + turn_event_callout_callout_request_model = TurnEventCalloutCalloutRequest.from_dict(turn_event_callout_callout_request_model_json) + assert turn_event_callout_callout_request_model != False + + # Construct a model instance of TurnEventCalloutCalloutRequest by calling from_dict on the json representation + turn_event_callout_callout_request_model_dict = TurnEventCalloutCalloutRequest.from_dict(turn_event_callout_callout_request_model_json).__dict__ + turn_event_callout_callout_request_model2 = TurnEventCalloutCalloutRequest(**turn_event_callout_callout_request_model_dict) + + # Verify the model instances are equivalent + assert turn_event_callout_callout_request_model == turn_event_callout_callout_request_model2 + + # Convert model instance back to dict and verify no loss of data + turn_event_callout_callout_request_model_json2 = turn_event_callout_callout_request_model.to_dict() + assert turn_event_callout_callout_request_model_json2 == turn_event_callout_callout_request_model_json + + +class TestModel_TurnEventCalloutCalloutResponse: + """ + Test Class for TurnEventCalloutCalloutResponse + """ + + def test_turn_event_callout_callout_response_serialization(self): + """ + Test serialization/deserialization for TurnEventCalloutCalloutResponse + """ + + # Construct a json representation of a TurnEventCalloutCalloutResponse model + turn_event_callout_callout_response_model_json = {} + turn_event_callout_callout_response_model_json['body'] = 'testString' + turn_event_callout_callout_response_model_json['status_code'] = 38 + turn_event_callout_callout_response_model_json['last_event'] = {'anyKey': 'anyValue'} + + # Construct a model instance of TurnEventCalloutCalloutResponse by calling from_dict on the json representation + turn_event_callout_callout_response_model = TurnEventCalloutCalloutResponse.from_dict(turn_event_callout_callout_response_model_json) + assert turn_event_callout_callout_response_model != False + + # Construct a model instance of TurnEventCalloutCalloutResponse by calling from_dict on the json representation + turn_event_callout_callout_response_model_dict = TurnEventCalloutCalloutResponse.from_dict(turn_event_callout_callout_response_model_json).__dict__ + turn_event_callout_callout_response_model2 = TurnEventCalloutCalloutResponse(**turn_event_callout_callout_response_model_dict) + + # Verify the model instances are equivalent + assert turn_event_callout_callout_response_model == turn_event_callout_callout_response_model2 + + # Convert model instance back to dict and verify no loss of data + turn_event_callout_callout_response_model_json2 = turn_event_callout_callout_response_model.to_dict() + assert turn_event_callout_callout_response_model_json2 == turn_event_callout_callout_response_model_json + + +class TestModel_TurnEventCalloutError: + """ + Test Class for TurnEventCalloutError + """ + + def test_turn_event_callout_error_serialization(self): + """ + Test serialization/deserialization for TurnEventCalloutError + """ + + # Construct a json representation of a TurnEventCalloutError model + turn_event_callout_error_model_json = {} + turn_event_callout_error_model_json['message'] = 'testString' + + # Construct a model instance of TurnEventCalloutError by calling from_dict on the json representation + turn_event_callout_error_model = TurnEventCalloutError.from_dict(turn_event_callout_error_model_json) + assert turn_event_callout_error_model != False + + # Construct a model instance of TurnEventCalloutError by calling from_dict on the json representation + turn_event_callout_error_model_dict = TurnEventCalloutError.from_dict(turn_event_callout_error_model_json).__dict__ + turn_event_callout_error_model2 = TurnEventCalloutError(**turn_event_callout_error_model_dict) + + # Verify the model instances are equivalent + assert turn_event_callout_error_model == turn_event_callout_error_model2 + + # Convert model instance back to dict and verify no loss of data + turn_event_callout_error_model_json2 = turn_event_callout_error_model.to_dict() + assert turn_event_callout_error_model_json2 == turn_event_callout_error_model_json + + +class TestModel_TurnEventGenerativeAICalledCallout: + """ + Test Class for TurnEventGenerativeAICalledCallout + """ + + def test_turn_event_generative_ai_called_callout_serialization(self): + """ + Test serialization/deserialization for TurnEventGenerativeAICalledCallout + """ + + # Construct dict forms of any model objects needed in order to build this model. + + turn_event_generative_ai_called_callout_request_model = {} # TurnEventGenerativeAICalledCalloutRequest + turn_event_generative_ai_called_callout_request_model['method'] = 'GET' + turn_event_generative_ai_called_callout_request_model['url'] = 'testString' + turn_event_generative_ai_called_callout_request_model['port'] = 'testString' + turn_event_generative_ai_called_callout_request_model['path'] = 'testString' + turn_event_generative_ai_called_callout_request_model['query_parameters'] = 'testString' + turn_event_generative_ai_called_callout_request_model['headers'] = {'anyKey': 'anyValue'} + turn_event_generative_ai_called_callout_request_model['body'] = {'anyKey': 'anyValue'} + + turn_event_generative_ai_called_callout_response_model = {} # TurnEventGenerativeAICalledCalloutResponse + turn_event_generative_ai_called_callout_response_model['body'] = 'testString' + turn_event_generative_ai_called_callout_response_model['status_code'] = 38 + + turn_event_generative_ai_called_callout_search_model = {} # TurnEventGenerativeAICalledCalloutSearch + turn_event_generative_ai_called_callout_search_model['engine'] = 'testString' + turn_event_generative_ai_called_callout_search_model['index'] = 'testString' + turn_event_generative_ai_called_callout_search_model['query'] = 'testString' + turn_event_generative_ai_called_callout_search_model['request'] = turn_event_generative_ai_called_callout_request_model + turn_event_generative_ai_called_callout_search_model['response'] = turn_event_generative_ai_called_callout_response_model + + turn_event_generative_ai_called_callout_llm_response_model = {} # TurnEventGenerativeAICalledCalloutLlmResponse + turn_event_generative_ai_called_callout_llm_response_model['text'] = 'testString' + turn_event_generative_ai_called_callout_llm_response_model['response_type'] = 'testString' + turn_event_generative_ai_called_callout_llm_response_model['is_idk_response'] = True + + search_results_result_metadata_model = {} # SearchResultsResultMetadata + search_results_result_metadata_model['document_retrieval_source'] = 'testString' + search_results_result_metadata_model['score'] = 38 + + search_results_model = {} # SearchResults + search_results_model['result_metadata'] = search_results_result_metadata_model + search_results_model['id'] = 'testString' + search_results_model['title'] = 'testString' + search_results_model['body'] = 'testString' + + turn_event_generative_ai_called_callout_llm_model = {} # TurnEventGenerativeAICalledCalloutLlm + turn_event_generative_ai_called_callout_llm_model['type'] = 'testString' + turn_event_generative_ai_called_callout_llm_model['model_id'] = 'testString' + turn_event_generative_ai_called_callout_llm_model['model_class_id'] = 'testString' + turn_event_generative_ai_called_callout_llm_model['generated_token_count'] = 38 + turn_event_generative_ai_called_callout_llm_model['input_token_count'] = 38 + turn_event_generative_ai_called_callout_llm_model['success'] = True + turn_event_generative_ai_called_callout_llm_model['response'] = turn_event_generative_ai_called_callout_llm_response_model + turn_event_generative_ai_called_callout_llm_model['request'] = [search_results_model] + + # Construct a json representation of a TurnEventGenerativeAICalledCallout model + turn_event_generative_ai_called_callout_model_json = {} + turn_event_generative_ai_called_callout_model_json['search_called'] = True + turn_event_generative_ai_called_callout_model_json['llm_called'] = True + turn_event_generative_ai_called_callout_model_json['search'] = turn_event_generative_ai_called_callout_search_model + turn_event_generative_ai_called_callout_model_json['llm'] = turn_event_generative_ai_called_callout_llm_model + turn_event_generative_ai_called_callout_model_json['idk_reason_code'] = 'testString' + + # Construct a model instance of TurnEventGenerativeAICalledCallout by calling from_dict on the json representation + turn_event_generative_ai_called_callout_model = TurnEventGenerativeAICalledCallout.from_dict(turn_event_generative_ai_called_callout_model_json) + assert turn_event_generative_ai_called_callout_model != False + + # Construct a model instance of TurnEventGenerativeAICalledCallout by calling from_dict on the json representation + turn_event_generative_ai_called_callout_model_dict = TurnEventGenerativeAICalledCallout.from_dict(turn_event_generative_ai_called_callout_model_json).__dict__ + turn_event_generative_ai_called_callout_model2 = TurnEventGenerativeAICalledCallout(**turn_event_generative_ai_called_callout_model_dict) + + # Verify the model instances are equivalent + assert turn_event_generative_ai_called_callout_model == turn_event_generative_ai_called_callout_model2 + + # Convert model instance back to dict and verify no loss of data + turn_event_generative_ai_called_callout_model_json2 = turn_event_generative_ai_called_callout_model.to_dict() + assert turn_event_generative_ai_called_callout_model_json2 == turn_event_generative_ai_called_callout_model_json + + +class TestModel_TurnEventGenerativeAICalledCalloutLlm: + """ + Test Class for TurnEventGenerativeAICalledCalloutLlm + """ + + def test_turn_event_generative_ai_called_callout_llm_serialization(self): + """ + Test serialization/deserialization for TurnEventGenerativeAICalledCalloutLlm + """ + + # Construct dict forms of any model objects needed in order to build this model. + + turn_event_generative_ai_called_callout_llm_response_model = {} # TurnEventGenerativeAICalledCalloutLlmResponse + turn_event_generative_ai_called_callout_llm_response_model['text'] = 'testString' + turn_event_generative_ai_called_callout_llm_response_model['response_type'] = 'testString' + turn_event_generative_ai_called_callout_llm_response_model['is_idk_response'] = True + + search_results_result_metadata_model = {} # SearchResultsResultMetadata + search_results_result_metadata_model['document_retrieval_source'] = 'testString' + search_results_result_metadata_model['score'] = 38 + + search_results_model = {} # SearchResults + search_results_model['result_metadata'] = search_results_result_metadata_model + search_results_model['id'] = 'testString' + search_results_model['title'] = 'testString' + search_results_model['body'] = 'testString' + + # Construct a json representation of a TurnEventGenerativeAICalledCalloutLlm model + turn_event_generative_ai_called_callout_llm_model_json = {} + turn_event_generative_ai_called_callout_llm_model_json['type'] = 'testString' + turn_event_generative_ai_called_callout_llm_model_json['model_id'] = 'testString' + turn_event_generative_ai_called_callout_llm_model_json['model_class_id'] = 'testString' + turn_event_generative_ai_called_callout_llm_model_json['generated_token_count'] = 38 + turn_event_generative_ai_called_callout_llm_model_json['input_token_count'] = 38 + turn_event_generative_ai_called_callout_llm_model_json['success'] = True + turn_event_generative_ai_called_callout_llm_model_json['response'] = turn_event_generative_ai_called_callout_llm_response_model + turn_event_generative_ai_called_callout_llm_model_json['request'] = [search_results_model] + + # Construct a model instance of TurnEventGenerativeAICalledCalloutLlm by calling from_dict on the json representation + turn_event_generative_ai_called_callout_llm_model = TurnEventGenerativeAICalledCalloutLlm.from_dict(turn_event_generative_ai_called_callout_llm_model_json) + assert turn_event_generative_ai_called_callout_llm_model != False + + # Construct a model instance of TurnEventGenerativeAICalledCalloutLlm by calling from_dict on the json representation + turn_event_generative_ai_called_callout_llm_model_dict = TurnEventGenerativeAICalledCalloutLlm.from_dict(turn_event_generative_ai_called_callout_llm_model_json).__dict__ + turn_event_generative_ai_called_callout_llm_model2 = TurnEventGenerativeAICalledCalloutLlm(**turn_event_generative_ai_called_callout_llm_model_dict) + + # Verify the model instances are equivalent + assert turn_event_generative_ai_called_callout_llm_model == turn_event_generative_ai_called_callout_llm_model2 + + # Convert model instance back to dict and verify no loss of data + turn_event_generative_ai_called_callout_llm_model_json2 = turn_event_generative_ai_called_callout_llm_model.to_dict() + assert turn_event_generative_ai_called_callout_llm_model_json2 == turn_event_generative_ai_called_callout_llm_model_json + + +class TestModel_TurnEventGenerativeAICalledCalloutLlmResponse: + """ + Test Class for TurnEventGenerativeAICalledCalloutLlmResponse + """ + + def test_turn_event_generative_ai_called_callout_llm_response_serialization(self): + """ + Test serialization/deserialization for TurnEventGenerativeAICalledCalloutLlmResponse + """ + + # Construct a json representation of a TurnEventGenerativeAICalledCalloutLlmResponse model + turn_event_generative_ai_called_callout_llm_response_model_json = {} + turn_event_generative_ai_called_callout_llm_response_model_json['text'] = 'testString' + turn_event_generative_ai_called_callout_llm_response_model_json['response_type'] = 'testString' + turn_event_generative_ai_called_callout_llm_response_model_json['is_idk_response'] = True + + # Construct a model instance of TurnEventGenerativeAICalledCalloutLlmResponse by calling from_dict on the json representation + turn_event_generative_ai_called_callout_llm_response_model = TurnEventGenerativeAICalledCalloutLlmResponse.from_dict(turn_event_generative_ai_called_callout_llm_response_model_json) + assert turn_event_generative_ai_called_callout_llm_response_model != False + + # Construct a model instance of TurnEventGenerativeAICalledCalloutLlmResponse by calling from_dict on the json representation + turn_event_generative_ai_called_callout_llm_response_model_dict = TurnEventGenerativeAICalledCalloutLlmResponse.from_dict(turn_event_generative_ai_called_callout_llm_response_model_json).__dict__ + turn_event_generative_ai_called_callout_llm_response_model2 = TurnEventGenerativeAICalledCalloutLlmResponse(**turn_event_generative_ai_called_callout_llm_response_model_dict) + + # Verify the model instances are equivalent + assert turn_event_generative_ai_called_callout_llm_response_model == turn_event_generative_ai_called_callout_llm_response_model2 + + # Convert model instance back to dict and verify no loss of data + turn_event_generative_ai_called_callout_llm_response_model_json2 = turn_event_generative_ai_called_callout_llm_response_model.to_dict() + assert turn_event_generative_ai_called_callout_llm_response_model_json2 == turn_event_generative_ai_called_callout_llm_response_model_json + + +class TestModel_TurnEventGenerativeAICalledCalloutRequest: + """ + Test Class for TurnEventGenerativeAICalledCalloutRequest + """ + + def test_turn_event_generative_ai_called_callout_request_serialization(self): + """ + Test serialization/deserialization for TurnEventGenerativeAICalledCalloutRequest + """ + + # Construct a json representation of a TurnEventGenerativeAICalledCalloutRequest model + turn_event_generative_ai_called_callout_request_model_json = {} + turn_event_generative_ai_called_callout_request_model_json['method'] = 'GET' + turn_event_generative_ai_called_callout_request_model_json['url'] = 'testString' + turn_event_generative_ai_called_callout_request_model_json['port'] = 'testString' + turn_event_generative_ai_called_callout_request_model_json['path'] = 'testString' + turn_event_generative_ai_called_callout_request_model_json['query_parameters'] = 'testString' + turn_event_generative_ai_called_callout_request_model_json['headers'] = {'anyKey': 'anyValue'} + turn_event_generative_ai_called_callout_request_model_json['body'] = {'anyKey': 'anyValue'} + + # Construct a model instance of TurnEventGenerativeAICalledCalloutRequest by calling from_dict on the json representation + turn_event_generative_ai_called_callout_request_model = TurnEventGenerativeAICalledCalloutRequest.from_dict(turn_event_generative_ai_called_callout_request_model_json) + assert turn_event_generative_ai_called_callout_request_model != False + + # Construct a model instance of TurnEventGenerativeAICalledCalloutRequest by calling from_dict on the json representation + turn_event_generative_ai_called_callout_request_model_dict = TurnEventGenerativeAICalledCalloutRequest.from_dict(turn_event_generative_ai_called_callout_request_model_json).__dict__ + turn_event_generative_ai_called_callout_request_model2 = TurnEventGenerativeAICalledCalloutRequest(**turn_event_generative_ai_called_callout_request_model_dict) + + # Verify the model instances are equivalent + assert turn_event_generative_ai_called_callout_request_model == turn_event_generative_ai_called_callout_request_model2 + + # Convert model instance back to dict and verify no loss of data + turn_event_generative_ai_called_callout_request_model_json2 = turn_event_generative_ai_called_callout_request_model.to_dict() + assert turn_event_generative_ai_called_callout_request_model_json2 == turn_event_generative_ai_called_callout_request_model_json + + +class TestModel_TurnEventGenerativeAICalledCalloutResponse: + """ + Test Class for TurnEventGenerativeAICalledCalloutResponse + """ + + def test_turn_event_generative_ai_called_callout_response_serialization(self): + """ + Test serialization/deserialization for TurnEventGenerativeAICalledCalloutResponse + """ + + # Construct a json representation of a TurnEventGenerativeAICalledCalloutResponse model + turn_event_generative_ai_called_callout_response_model_json = {} + turn_event_generative_ai_called_callout_response_model_json['body'] = 'testString' + turn_event_generative_ai_called_callout_response_model_json['status_code'] = 38 + + # Construct a model instance of TurnEventGenerativeAICalledCalloutResponse by calling from_dict on the json representation + turn_event_generative_ai_called_callout_response_model = TurnEventGenerativeAICalledCalloutResponse.from_dict(turn_event_generative_ai_called_callout_response_model_json) + assert turn_event_generative_ai_called_callout_response_model != False + + # Construct a model instance of TurnEventGenerativeAICalledCalloutResponse by calling from_dict on the json representation + turn_event_generative_ai_called_callout_response_model_dict = TurnEventGenerativeAICalledCalloutResponse.from_dict(turn_event_generative_ai_called_callout_response_model_json).__dict__ + turn_event_generative_ai_called_callout_response_model2 = TurnEventGenerativeAICalledCalloutResponse(**turn_event_generative_ai_called_callout_response_model_dict) + + # Verify the model instances are equivalent + assert turn_event_generative_ai_called_callout_response_model == turn_event_generative_ai_called_callout_response_model2 + + # Convert model instance back to dict and verify no loss of data + turn_event_generative_ai_called_callout_response_model_json2 = turn_event_generative_ai_called_callout_response_model.to_dict() + assert turn_event_generative_ai_called_callout_response_model_json2 == turn_event_generative_ai_called_callout_response_model_json + + +class TestModel_TurnEventGenerativeAICalledCalloutSearch: + """ + Test Class for TurnEventGenerativeAICalledCalloutSearch + """ + + def test_turn_event_generative_ai_called_callout_search_serialization(self): + """ + Test serialization/deserialization for TurnEventGenerativeAICalledCalloutSearch + """ + + # Construct dict forms of any model objects needed in order to build this model. + + turn_event_generative_ai_called_callout_request_model = {} # TurnEventGenerativeAICalledCalloutRequest + turn_event_generative_ai_called_callout_request_model['method'] = 'GET' + turn_event_generative_ai_called_callout_request_model['url'] = 'testString' + turn_event_generative_ai_called_callout_request_model['port'] = 'testString' + turn_event_generative_ai_called_callout_request_model['path'] = 'testString' + turn_event_generative_ai_called_callout_request_model['query_parameters'] = 'testString' + turn_event_generative_ai_called_callout_request_model['headers'] = {'anyKey': 'anyValue'} + turn_event_generative_ai_called_callout_request_model['body'] = {'anyKey': 'anyValue'} + + turn_event_generative_ai_called_callout_response_model = {} # TurnEventGenerativeAICalledCalloutResponse + turn_event_generative_ai_called_callout_response_model['body'] = 'testString' + turn_event_generative_ai_called_callout_response_model['status_code'] = 38 + + # Construct a json representation of a TurnEventGenerativeAICalledCalloutSearch model + turn_event_generative_ai_called_callout_search_model_json = {} + turn_event_generative_ai_called_callout_search_model_json['engine'] = 'testString' + turn_event_generative_ai_called_callout_search_model_json['index'] = 'testString' + turn_event_generative_ai_called_callout_search_model_json['query'] = 'testString' + turn_event_generative_ai_called_callout_search_model_json['request'] = turn_event_generative_ai_called_callout_request_model + turn_event_generative_ai_called_callout_search_model_json['response'] = turn_event_generative_ai_called_callout_response_model + + # Construct a model instance of TurnEventGenerativeAICalledCalloutSearch by calling from_dict on the json representation + turn_event_generative_ai_called_callout_search_model = TurnEventGenerativeAICalledCalloutSearch.from_dict(turn_event_generative_ai_called_callout_search_model_json) + assert turn_event_generative_ai_called_callout_search_model != False + + # Construct a model instance of TurnEventGenerativeAICalledCalloutSearch by calling from_dict on the json representation + turn_event_generative_ai_called_callout_search_model_dict = TurnEventGenerativeAICalledCalloutSearch.from_dict(turn_event_generative_ai_called_callout_search_model_json).__dict__ + turn_event_generative_ai_called_callout_search_model2 = TurnEventGenerativeAICalledCalloutSearch(**turn_event_generative_ai_called_callout_search_model_dict) + + # Verify the model instances are equivalent + assert turn_event_generative_ai_called_callout_search_model == turn_event_generative_ai_called_callout_search_model2 + + # Convert model instance back to dict and verify no loss of data + turn_event_generative_ai_called_callout_search_model_json2 = turn_event_generative_ai_called_callout_search_model.to_dict() + assert turn_event_generative_ai_called_callout_search_model_json2 == turn_event_generative_ai_called_callout_search_model_json + + +class TestModel_TurnEventGenerativeAICalledMetrics: + """ + Test Class for TurnEventGenerativeAICalledMetrics + """ + + def test_turn_event_generative_ai_called_metrics_serialization(self): + """ + Test serialization/deserialization for TurnEventGenerativeAICalledMetrics + """ + + # Construct a json representation of a TurnEventGenerativeAICalledMetrics model + turn_event_generative_ai_called_metrics_model_json = {} + turn_event_generative_ai_called_metrics_model_json['search_time_ms'] = 72.5 + turn_event_generative_ai_called_metrics_model_json['answer_generation_time_ms'] = 72.5 + turn_event_generative_ai_called_metrics_model_json['total_time_ms'] = 72.5 + + # Construct a model instance of TurnEventGenerativeAICalledMetrics by calling from_dict on the json representation + turn_event_generative_ai_called_metrics_model = TurnEventGenerativeAICalledMetrics.from_dict(turn_event_generative_ai_called_metrics_model_json) + assert turn_event_generative_ai_called_metrics_model != False + + # Construct a model instance of TurnEventGenerativeAICalledMetrics by calling from_dict on the json representation + turn_event_generative_ai_called_metrics_model_dict = TurnEventGenerativeAICalledMetrics.from_dict(turn_event_generative_ai_called_metrics_model_json).__dict__ + turn_event_generative_ai_called_metrics_model2 = TurnEventGenerativeAICalledMetrics(**turn_event_generative_ai_called_metrics_model_dict) + + # Verify the model instances are equivalent + assert turn_event_generative_ai_called_metrics_model == turn_event_generative_ai_called_metrics_model2 + + # Convert model instance back to dict and verify no loss of data + turn_event_generative_ai_called_metrics_model_json2 = turn_event_generative_ai_called_metrics_model.to_dict() + assert turn_event_generative_ai_called_metrics_model_json2 == turn_event_generative_ai_called_metrics_model_json + + +class TestModel_TurnEventNodeSource: + """ + Test Class for TurnEventNodeSource + """ + + def test_turn_event_node_source_serialization(self): + """ + Test serialization/deserialization for TurnEventNodeSource + """ + + # Construct a json representation of a TurnEventNodeSource model + turn_event_node_source_model_json = {} + turn_event_node_source_model_json['type'] = 'dialog_node' + turn_event_node_source_model_json['dialog_node'] = 'testString' + turn_event_node_source_model_json['title'] = 'testString' + turn_event_node_source_model_json['condition'] = 'testString' + + # Construct a model instance of TurnEventNodeSource by calling from_dict on the json representation + turn_event_node_source_model = TurnEventNodeSource.from_dict(turn_event_node_source_model_json) + assert turn_event_node_source_model != False + + # Construct a model instance of TurnEventNodeSource by calling from_dict on the json representation + turn_event_node_source_model_dict = TurnEventNodeSource.from_dict(turn_event_node_source_model_json).__dict__ + turn_event_node_source_model2 = TurnEventNodeSource(**turn_event_node_source_model_dict) + + # Verify the model instances are equivalent + assert turn_event_node_source_model == turn_event_node_source_model2 + + # Convert model instance back to dict and verify no loss of data + turn_event_node_source_model_json2 = turn_event_node_source_model.to_dict() + assert turn_event_node_source_model_json2 == turn_event_node_source_model_json + + +class TestModel_TurnEventSearchError: + """ + Test Class for TurnEventSearchError + """ + + def test_turn_event_search_error_serialization(self): + """ + Test serialization/deserialization for TurnEventSearchError + """ + + # Construct a json representation of a TurnEventSearchError model + turn_event_search_error_model_json = {} + turn_event_search_error_model_json['message'] = 'testString' + + # Construct a model instance of TurnEventSearchError by calling from_dict on the json representation + turn_event_search_error_model = TurnEventSearchError.from_dict(turn_event_search_error_model_json) + assert turn_event_search_error_model != False + + # Construct a model instance of TurnEventSearchError by calling from_dict on the json representation + turn_event_search_error_model_dict = TurnEventSearchError.from_dict(turn_event_search_error_model_json).__dict__ + turn_event_search_error_model2 = TurnEventSearchError(**turn_event_search_error_model_dict) + + # Verify the model instances are equivalent + assert turn_event_search_error_model == turn_event_search_error_model2 + + # Convert model instance back to dict and verify no loss of data + turn_event_search_error_model_json2 = turn_event_search_error_model.to_dict() + assert turn_event_search_error_model_json2 == turn_event_search_error_model_json + + +class TestModel_TurnEventStepSource: + """ + Test Class for TurnEventStepSource + """ + + def test_turn_event_step_source_serialization(self): + """ + Test serialization/deserialization for TurnEventStepSource + """ + + # Construct a json representation of a TurnEventStepSource model + turn_event_step_source_model_json = {} + turn_event_step_source_model_json['type'] = 'step' + turn_event_step_source_model_json['action'] = 'testString' + turn_event_step_source_model_json['action_title'] = 'testString' + turn_event_step_source_model_json['step'] = 'testString' + turn_event_step_source_model_json['is_ai_guided'] = True + turn_event_step_source_model_json['is_skill_based'] = True + + # Construct a model instance of TurnEventStepSource by calling from_dict on the json representation + turn_event_step_source_model = TurnEventStepSource.from_dict(turn_event_step_source_model_json) + assert turn_event_step_source_model != False + + # Construct a model instance of TurnEventStepSource by calling from_dict on the json representation + turn_event_step_source_model_dict = TurnEventStepSource.from_dict(turn_event_step_source_model_json).__dict__ + turn_event_step_source_model2 = TurnEventStepSource(**turn_event_step_source_model_dict) + + # Verify the model instances are equivalent + assert turn_event_step_source_model == turn_event_step_source_model2 + + # Convert model instance back to dict and verify no loss of data + turn_event_step_source_model_json2 = turn_event_step_source_model.to_dict() + assert turn_event_step_source_model_json2 == turn_event_step_source_model_json + + +class TestModel_UpdateEnvironmentOrchestration: + """ + Test Class for UpdateEnvironmentOrchestration + """ + + def test_update_environment_orchestration_serialization(self): + """ + Test serialization/deserialization for UpdateEnvironmentOrchestration + """ + + # Construct a json representation of a UpdateEnvironmentOrchestration model + update_environment_orchestration_model_json = {} + update_environment_orchestration_model_json['search_skill_fallback'] = True + + # Construct a model instance of UpdateEnvironmentOrchestration by calling from_dict on the json representation + update_environment_orchestration_model = UpdateEnvironmentOrchestration.from_dict(update_environment_orchestration_model_json) + assert update_environment_orchestration_model != False + + # Construct a model instance of UpdateEnvironmentOrchestration by calling from_dict on the json representation + update_environment_orchestration_model_dict = UpdateEnvironmentOrchestration.from_dict(update_environment_orchestration_model_json).__dict__ + update_environment_orchestration_model2 = UpdateEnvironmentOrchestration(**update_environment_orchestration_model_dict) + + # Verify the model instances are equivalent + assert update_environment_orchestration_model == update_environment_orchestration_model2 + + # Convert model instance back to dict and verify no loss of data + update_environment_orchestration_model_json2 = update_environment_orchestration_model.to_dict() + assert update_environment_orchestration_model_json2 == update_environment_orchestration_model_json + + +class TestModel_UpdateEnvironmentReleaseReference: + """ + Test Class for UpdateEnvironmentReleaseReference + """ + + def test_update_environment_release_reference_serialization(self): + """ + Test serialization/deserialization for UpdateEnvironmentReleaseReference + """ + + # Construct a json representation of a UpdateEnvironmentReleaseReference model + update_environment_release_reference_model_json = {} + update_environment_release_reference_model_json['release'] = 'testString' + + # Construct a model instance of UpdateEnvironmentReleaseReference by calling from_dict on the json representation + update_environment_release_reference_model = UpdateEnvironmentReleaseReference.from_dict(update_environment_release_reference_model_json) + assert update_environment_release_reference_model != False + + # Construct a model instance of UpdateEnvironmentReleaseReference by calling from_dict on the json representation + update_environment_release_reference_model_dict = UpdateEnvironmentReleaseReference.from_dict(update_environment_release_reference_model_json).__dict__ + update_environment_release_reference_model2 = UpdateEnvironmentReleaseReference(**update_environment_release_reference_model_dict) + + # Verify the model instances are equivalent + assert update_environment_release_reference_model == update_environment_release_reference_model2 + + # Convert model instance back to dict and verify no loss of data + update_environment_release_reference_model_json2 = update_environment_release_reference_model.to_dict() + assert update_environment_release_reference_model_json2 == update_environment_release_reference_model_json + + +class TestModel_CompleteItem: + """ + Test Class for CompleteItem + """ + + def test_complete_item_serialization(self): + """ + Test serialization/deserialization for CompleteItem + """ + + # Construct dict forms of any model objects needed in order to build this model. + + metadata_model = {} # Metadata + metadata_model['id'] = 38 + + # Construct a json representation of a CompleteItem model + complete_item_model_json = {} + complete_item_model_json['streaming_metadata'] = metadata_model + + # Construct a model instance of CompleteItem by calling from_dict on the json representation + complete_item_model = CompleteItem.from_dict(complete_item_model_json) + assert complete_item_model != False + + # Construct a model instance of CompleteItem by calling from_dict on the json representation + complete_item_model_dict = CompleteItem.from_dict(complete_item_model_json).__dict__ + complete_item_model2 = CompleteItem(**complete_item_model_dict) + + # Verify the model instances are equivalent + assert complete_item_model == complete_item_model2 + + # Convert model instance back to dict and verify no loss of data + complete_item_model_json2 = complete_item_model.to_dict() + assert complete_item_model_json2 == complete_item_model_json + + +class TestModel_GenerativeAITaskContentGroundedAnswering: + """ + Test Class for GenerativeAITaskContentGroundedAnswering + """ + + def test_generative_ai_task_content_grounded_answering_serialization(self): + """ + Test serialization/deserialization for GenerativeAITaskContentGroundedAnswering + """ + + # Construct dict forms of any model objects needed in order to build this model. + + generative_ai_task_confidence_scores_model = {} # GenerativeAITaskConfidenceScores + generative_ai_task_confidence_scores_model['pre_gen'] = 72.5 + generative_ai_task_confidence_scores_model['pre_gen_threshold'] = 72.5 + generative_ai_task_confidence_scores_model['post_gen'] = 72.5 + generative_ai_task_confidence_scores_model['post_gen_threshold'] = 72.5 + + # Construct a json representation of a GenerativeAITaskContentGroundedAnswering model + generative_ai_task_content_grounded_answering_model_json = {} + generative_ai_task_content_grounded_answering_model_json['task'] = 'content_grounded_answering' + generative_ai_task_content_grounded_answering_model_json['is_idk_response'] = True + generative_ai_task_content_grounded_answering_model_json['is_hap_detected'] = True + generative_ai_task_content_grounded_answering_model_json['confidence_scores'] = generative_ai_task_confidence_scores_model + generative_ai_task_content_grounded_answering_model_json['original_response'] = 'testString' + generative_ai_task_content_grounded_answering_model_json['inferred_query'] = 'testString' + + # Construct a model instance of GenerativeAITaskContentGroundedAnswering by calling from_dict on the json representation + generative_ai_task_content_grounded_answering_model = GenerativeAITaskContentGroundedAnswering.from_dict(generative_ai_task_content_grounded_answering_model_json) + assert generative_ai_task_content_grounded_answering_model != False + + # Construct a model instance of GenerativeAITaskContentGroundedAnswering by calling from_dict on the json representation + generative_ai_task_content_grounded_answering_model_dict = GenerativeAITaskContentGroundedAnswering.from_dict(generative_ai_task_content_grounded_answering_model_json).__dict__ + generative_ai_task_content_grounded_answering_model2 = GenerativeAITaskContentGroundedAnswering(**generative_ai_task_content_grounded_answering_model_dict) + + # Verify the model instances are equivalent + assert generative_ai_task_content_grounded_answering_model == generative_ai_task_content_grounded_answering_model2 + + # Convert model instance back to dict and verify no loss of data + generative_ai_task_content_grounded_answering_model_json2 = generative_ai_task_content_grounded_answering_model.to_dict() + assert generative_ai_task_content_grounded_answering_model_json2 == generative_ai_task_content_grounded_answering_model_json + + +class TestModel_GenerativeAITaskGeneralPurposeAnswering: + """ + Test Class for GenerativeAITaskGeneralPurposeAnswering + """ + + def test_generative_ai_task_general_purpose_answering_serialization(self): + """ + Test serialization/deserialization for GenerativeAITaskGeneralPurposeAnswering + """ + + # Construct a json representation of a GenerativeAITaskGeneralPurposeAnswering model + generative_ai_task_general_purpose_answering_model_json = {} + generative_ai_task_general_purpose_answering_model_json['task'] = 'general_purpose_answering' + generative_ai_task_general_purpose_answering_model_json['is_idk_response'] = True + generative_ai_task_general_purpose_answering_model_json['is_hap_detected'] = True + + # Construct a model instance of GenerativeAITaskGeneralPurposeAnswering by calling from_dict on the json representation + generative_ai_task_general_purpose_answering_model = GenerativeAITaskGeneralPurposeAnswering.from_dict(generative_ai_task_general_purpose_answering_model_json) + assert generative_ai_task_general_purpose_answering_model != False + + # Construct a model instance of GenerativeAITaskGeneralPurposeAnswering by calling from_dict on the json representation + generative_ai_task_general_purpose_answering_model_dict = GenerativeAITaskGeneralPurposeAnswering.from_dict(generative_ai_task_general_purpose_answering_model_json).__dict__ + generative_ai_task_general_purpose_answering_model2 = GenerativeAITaskGeneralPurposeAnswering(**generative_ai_task_general_purpose_answering_model_dict) + + # Verify the model instances are equivalent + assert generative_ai_task_general_purpose_answering_model == generative_ai_task_general_purpose_answering_model2 + + # Convert model instance back to dict and verify no loss of data + generative_ai_task_general_purpose_answering_model_json2 = generative_ai_task_general_purpose_answering_model.to_dict() + assert generative_ai_task_general_purpose_answering_model_json2 == generative_ai_task_general_purpose_answering_model_json + + +class TestModel_LogMessageSourceAction: + """ + Test Class for LogMessageSourceAction + """ + + def test_log_message_source_action_serialization(self): + """ + Test serialization/deserialization for LogMessageSourceAction + """ + + # Construct a json representation of a LogMessageSourceAction model + log_message_source_action_model_json = {} + log_message_source_action_model_json['type'] = 'action' + log_message_source_action_model_json['action'] = 'testString' + + # Construct a model instance of LogMessageSourceAction by calling from_dict on the json representation + log_message_source_action_model = LogMessageSourceAction.from_dict(log_message_source_action_model_json) + assert log_message_source_action_model != False + + # Construct a model instance of LogMessageSourceAction by calling from_dict on the json representation + log_message_source_action_model_dict = LogMessageSourceAction.from_dict(log_message_source_action_model_json).__dict__ + log_message_source_action_model2 = LogMessageSourceAction(**log_message_source_action_model_dict) + + # Verify the model instances are equivalent + assert log_message_source_action_model == log_message_source_action_model2 + + # Convert model instance back to dict and verify no loss of data + log_message_source_action_model_json2 = log_message_source_action_model.to_dict() + assert log_message_source_action_model_json2 == log_message_source_action_model_json + + +class TestModel_LogMessageSourceDialogNode: + """ + Test Class for LogMessageSourceDialogNode + """ + + def test_log_message_source_dialog_node_serialization(self): + """ + Test serialization/deserialization for LogMessageSourceDialogNode + """ + + # Construct a json representation of a LogMessageSourceDialogNode model + log_message_source_dialog_node_model_json = {} + log_message_source_dialog_node_model_json['type'] = 'dialog_node' + log_message_source_dialog_node_model_json['dialog_node'] = 'testString' + + # Construct a model instance of LogMessageSourceDialogNode by calling from_dict on the json representation + log_message_source_dialog_node_model = LogMessageSourceDialogNode.from_dict(log_message_source_dialog_node_model_json) + assert log_message_source_dialog_node_model != False + + # Construct a model instance of LogMessageSourceDialogNode by calling from_dict on the json representation + log_message_source_dialog_node_model_dict = LogMessageSourceDialogNode.from_dict(log_message_source_dialog_node_model_json).__dict__ + log_message_source_dialog_node_model2 = LogMessageSourceDialogNode(**log_message_source_dialog_node_model_dict) + + # Verify the model instances are equivalent + assert log_message_source_dialog_node_model == log_message_source_dialog_node_model2 + + # Convert model instance back to dict and verify no loss of data + log_message_source_dialog_node_model_json2 = log_message_source_dialog_node_model.to_dict() + assert log_message_source_dialog_node_model_json2 == log_message_source_dialog_node_model_json + + +class TestModel_LogMessageSourceHandler: + """ + Test Class for LogMessageSourceHandler + """ + + def test_log_message_source_handler_serialization(self): + """ + Test serialization/deserialization for LogMessageSourceHandler + """ + + # Construct a json representation of a LogMessageSourceHandler model + log_message_source_handler_model_json = {} + log_message_source_handler_model_json['type'] = 'handler' + log_message_source_handler_model_json['action'] = 'testString' + log_message_source_handler_model_json['step'] = 'testString' + log_message_source_handler_model_json['handler'] = 'testString' + + # Construct a model instance of LogMessageSourceHandler by calling from_dict on the json representation + log_message_source_handler_model = LogMessageSourceHandler.from_dict(log_message_source_handler_model_json) + assert log_message_source_handler_model != False + + # Construct a model instance of LogMessageSourceHandler by calling from_dict on the json representation + log_message_source_handler_model_dict = LogMessageSourceHandler.from_dict(log_message_source_handler_model_json).__dict__ + log_message_source_handler_model2 = LogMessageSourceHandler(**log_message_source_handler_model_dict) + + # Verify the model instances are equivalent + assert log_message_source_handler_model == log_message_source_handler_model2 + + # Convert model instance back to dict and verify no loss of data + log_message_source_handler_model_json2 = log_message_source_handler_model.to_dict() + assert log_message_source_handler_model_json2 == log_message_source_handler_model_json + + +class TestModel_LogMessageSourceStep: + """ + Test Class for LogMessageSourceStep + """ + + def test_log_message_source_step_serialization(self): + """ + Test serialization/deserialization for LogMessageSourceStep + """ + + # Construct a json representation of a LogMessageSourceStep model + log_message_source_step_model_json = {} + log_message_source_step_model_json['type'] = 'step' + log_message_source_step_model_json['action'] = 'testString' + log_message_source_step_model_json['step'] = 'testString' + + # Construct a model instance of LogMessageSourceStep by calling from_dict on the json representation + log_message_source_step_model = LogMessageSourceStep.from_dict(log_message_source_step_model_json) + assert log_message_source_step_model != False + + # Construct a model instance of LogMessageSourceStep by calling from_dict on the json representation + log_message_source_step_model_dict = LogMessageSourceStep.from_dict(log_message_source_step_model_json).__dict__ + log_message_source_step_model2 = LogMessageSourceStep(**log_message_source_step_model_dict) + + # Verify the model instances are equivalent + assert log_message_source_step_model == log_message_source_step_model2 + + # Convert model instance back to dict and verify no loss of data + log_message_source_step_model_json2 = log_message_source_step_model.to_dict() + assert log_message_source_step_model_json2 == log_message_source_step_model_json + + +class TestModel_MessageOutputDebugTurnEventTurnEventActionFinished: + """ + Test Class for MessageOutputDebugTurnEventTurnEventActionFinished + """ + + def test_message_output_debug_turn_event_turn_event_action_finished_serialization(self): + """ + Test serialization/deserialization for MessageOutputDebugTurnEventTurnEventActionFinished + """ + + # Construct dict forms of any model objects needed in order to build this model. + + turn_event_action_source_model = {} # TurnEventActionSource + turn_event_action_source_model['type'] = 'action' + turn_event_action_source_model['action'] = 'testString' + turn_event_action_source_model['action_title'] = 'testString' + turn_event_action_source_model['condition'] = 'testString' + + # Construct a json representation of a MessageOutputDebugTurnEventTurnEventActionFinished model + message_output_debug_turn_event_turn_event_action_finished_model_json = {} + message_output_debug_turn_event_turn_event_action_finished_model_json['event'] = 'action_finished' + message_output_debug_turn_event_turn_event_action_finished_model_json['source'] = turn_event_action_source_model + message_output_debug_turn_event_turn_event_action_finished_model_json['action_start_time'] = 'testString' + message_output_debug_turn_event_turn_event_action_finished_model_json['condition_type'] = 'user_defined' + message_output_debug_turn_event_turn_event_action_finished_model_json['reason'] = 'all_steps_done' + message_output_debug_turn_event_turn_event_action_finished_model_json['action_variables'] = {'anyKey': 'anyValue'} + + # Construct a model instance of MessageOutputDebugTurnEventTurnEventActionFinished by calling from_dict on the json representation + message_output_debug_turn_event_turn_event_action_finished_model = MessageOutputDebugTurnEventTurnEventActionFinished.from_dict(message_output_debug_turn_event_turn_event_action_finished_model_json) + assert message_output_debug_turn_event_turn_event_action_finished_model != False + + # Construct a model instance of MessageOutputDebugTurnEventTurnEventActionFinished by calling from_dict on the json representation + message_output_debug_turn_event_turn_event_action_finished_model_dict = MessageOutputDebugTurnEventTurnEventActionFinished.from_dict(message_output_debug_turn_event_turn_event_action_finished_model_json).__dict__ + message_output_debug_turn_event_turn_event_action_finished_model2 = MessageOutputDebugTurnEventTurnEventActionFinished(**message_output_debug_turn_event_turn_event_action_finished_model_dict) + + # Verify the model instances are equivalent + assert message_output_debug_turn_event_turn_event_action_finished_model == message_output_debug_turn_event_turn_event_action_finished_model2 + + # Convert model instance back to dict and verify no loss of data + message_output_debug_turn_event_turn_event_action_finished_model_json2 = message_output_debug_turn_event_turn_event_action_finished_model.to_dict() + assert message_output_debug_turn_event_turn_event_action_finished_model_json2 == message_output_debug_turn_event_turn_event_action_finished_model_json + + +class TestModel_MessageOutputDebugTurnEventTurnEventActionRoutingDenied: + """ + Test Class for MessageOutputDebugTurnEventTurnEventActionRoutingDenied + """ + + def test_message_output_debug_turn_event_turn_event_action_routing_denied_serialization(self): + """ + Test serialization/deserialization for MessageOutputDebugTurnEventTurnEventActionRoutingDenied + """ + + # Construct dict forms of any model objects needed in order to build this model. + + turn_event_action_source_model = {} # TurnEventActionSource + turn_event_action_source_model['type'] = 'action' + turn_event_action_source_model['action'] = 'testString' + turn_event_action_source_model['action_title'] = 'testString' + turn_event_action_source_model['condition'] = 'testString' + + # Construct a json representation of a MessageOutputDebugTurnEventTurnEventActionRoutingDenied model + message_output_debug_turn_event_turn_event_action_routing_denied_model_json = {} + message_output_debug_turn_event_turn_event_action_routing_denied_model_json['event'] = 'action_routing_denied' + message_output_debug_turn_event_turn_event_action_routing_denied_model_json['source'] = turn_event_action_source_model + message_output_debug_turn_event_turn_event_action_routing_denied_model_json['condition_type'] = 'user_defined' + message_output_debug_turn_event_turn_event_action_routing_denied_model_json['reason'] = 'action_conditions_failed' + + # Construct a model instance of MessageOutputDebugTurnEventTurnEventActionRoutingDenied by calling from_dict on the json representation + message_output_debug_turn_event_turn_event_action_routing_denied_model = MessageOutputDebugTurnEventTurnEventActionRoutingDenied.from_dict(message_output_debug_turn_event_turn_event_action_routing_denied_model_json) + assert message_output_debug_turn_event_turn_event_action_routing_denied_model != False + + # Construct a model instance of MessageOutputDebugTurnEventTurnEventActionRoutingDenied by calling from_dict on the json representation + message_output_debug_turn_event_turn_event_action_routing_denied_model_dict = MessageOutputDebugTurnEventTurnEventActionRoutingDenied.from_dict(message_output_debug_turn_event_turn_event_action_routing_denied_model_json).__dict__ + message_output_debug_turn_event_turn_event_action_routing_denied_model2 = MessageOutputDebugTurnEventTurnEventActionRoutingDenied(**message_output_debug_turn_event_turn_event_action_routing_denied_model_dict) + + # Verify the model instances are equivalent + assert message_output_debug_turn_event_turn_event_action_routing_denied_model == message_output_debug_turn_event_turn_event_action_routing_denied_model2 + + # Convert model instance back to dict and verify no loss of data + message_output_debug_turn_event_turn_event_action_routing_denied_model_json2 = message_output_debug_turn_event_turn_event_action_routing_denied_model.to_dict() + assert message_output_debug_turn_event_turn_event_action_routing_denied_model_json2 == message_output_debug_turn_event_turn_event_action_routing_denied_model_json + + +class TestModel_MessageOutputDebugTurnEventTurnEventActionVisited: + """ + Test Class for MessageOutputDebugTurnEventTurnEventActionVisited + """ + + def test_message_output_debug_turn_event_turn_event_action_visited_serialization(self): + """ + Test serialization/deserialization for MessageOutputDebugTurnEventTurnEventActionVisited + """ + + # Construct dict forms of any model objects needed in order to build this model. + + turn_event_action_source_model = {} # TurnEventActionSource + turn_event_action_source_model['type'] = 'action' + turn_event_action_source_model['action'] = 'testString' + turn_event_action_source_model['action_title'] = 'testString' + turn_event_action_source_model['condition'] = 'testString' + + # Construct a json representation of a MessageOutputDebugTurnEventTurnEventActionVisited model + message_output_debug_turn_event_turn_event_action_visited_model_json = {} + message_output_debug_turn_event_turn_event_action_visited_model_json['event'] = 'action_visited' + message_output_debug_turn_event_turn_event_action_visited_model_json['source'] = turn_event_action_source_model + message_output_debug_turn_event_turn_event_action_visited_model_json['action_start_time'] = 'testString' + message_output_debug_turn_event_turn_event_action_visited_model_json['condition_type'] = 'user_defined' + message_output_debug_turn_event_turn_event_action_visited_model_json['reason'] = 'intent' + message_output_debug_turn_event_turn_event_action_visited_model_json['result_variable'] = 'testString' + + # Construct a model instance of MessageOutputDebugTurnEventTurnEventActionVisited by calling from_dict on the json representation + message_output_debug_turn_event_turn_event_action_visited_model = MessageOutputDebugTurnEventTurnEventActionVisited.from_dict(message_output_debug_turn_event_turn_event_action_visited_model_json) + assert message_output_debug_turn_event_turn_event_action_visited_model != False + + # Construct a model instance of MessageOutputDebugTurnEventTurnEventActionVisited by calling from_dict on the json representation + message_output_debug_turn_event_turn_event_action_visited_model_dict = MessageOutputDebugTurnEventTurnEventActionVisited.from_dict(message_output_debug_turn_event_turn_event_action_visited_model_json).__dict__ + message_output_debug_turn_event_turn_event_action_visited_model2 = MessageOutputDebugTurnEventTurnEventActionVisited(**message_output_debug_turn_event_turn_event_action_visited_model_dict) + + # Verify the model instances are equivalent + assert message_output_debug_turn_event_turn_event_action_visited_model == message_output_debug_turn_event_turn_event_action_visited_model2 + + # Convert model instance back to dict and verify no loss of data + message_output_debug_turn_event_turn_event_action_visited_model_json2 = message_output_debug_turn_event_turn_event_action_visited_model.to_dict() + assert message_output_debug_turn_event_turn_event_action_visited_model_json2 == message_output_debug_turn_event_turn_event_action_visited_model_json + + +class TestModel_MessageOutputDebugTurnEventTurnEventCallout: + """ + Test Class for MessageOutputDebugTurnEventTurnEventCallout + """ + + def test_message_output_debug_turn_event_turn_event_callout_serialization(self): + """ + Test serialization/deserialization for MessageOutputDebugTurnEventTurnEventCallout + """ + + # Construct dict forms of any model objects needed in order to build this model. + + turn_event_action_source_model = {} # TurnEventActionSource + turn_event_action_source_model['type'] = 'action' + turn_event_action_source_model['action'] = 'testString' + turn_event_action_source_model['action_title'] = 'testString' + turn_event_action_source_model['condition'] = 'testString' + + turn_event_callout_callout_request_model = {} # TurnEventCalloutCalloutRequest + turn_event_callout_callout_request_model['method'] = 'get' + turn_event_callout_callout_request_model['url'] = 'testString' + turn_event_callout_callout_request_model['path'] = 'testString' + turn_event_callout_callout_request_model['query_parameters'] = 'testString' + turn_event_callout_callout_request_model['headers'] = {'anyKey': 'anyValue'} + turn_event_callout_callout_request_model['body'] = {'anyKey': 'anyValue'} + + turn_event_callout_callout_response_model = {} # TurnEventCalloutCalloutResponse + turn_event_callout_callout_response_model['body'] = 'testString' + turn_event_callout_callout_response_model['status_code'] = 38 + turn_event_callout_callout_response_model['last_event'] = {'anyKey': 'anyValue'} + + turn_event_callout_callout_model = {} # TurnEventCalloutCallout + turn_event_callout_callout_model['type'] = 'integration_interaction' + turn_event_callout_callout_model['internal'] = {'anyKey': 'anyValue'} + turn_event_callout_callout_model['result_variable'] = 'testString' + turn_event_callout_callout_model['request'] = turn_event_callout_callout_request_model + turn_event_callout_callout_model['response'] = turn_event_callout_callout_response_model + + turn_event_callout_error_model = {} # TurnEventCalloutError + turn_event_callout_error_model['message'] = 'testString' + + # Construct a json representation of a MessageOutputDebugTurnEventTurnEventCallout model + message_output_debug_turn_event_turn_event_callout_model_json = {} + message_output_debug_turn_event_turn_event_callout_model_json['event'] = 'callout' + message_output_debug_turn_event_turn_event_callout_model_json['source'] = turn_event_action_source_model + message_output_debug_turn_event_turn_event_callout_model_json['callout'] = turn_event_callout_callout_model + message_output_debug_turn_event_turn_event_callout_model_json['error'] = turn_event_callout_error_model + + # Construct a model instance of MessageOutputDebugTurnEventTurnEventCallout by calling from_dict on the json representation + message_output_debug_turn_event_turn_event_callout_model = MessageOutputDebugTurnEventTurnEventCallout.from_dict(message_output_debug_turn_event_turn_event_callout_model_json) + assert message_output_debug_turn_event_turn_event_callout_model != False + + # Construct a model instance of MessageOutputDebugTurnEventTurnEventCallout by calling from_dict on the json representation + message_output_debug_turn_event_turn_event_callout_model_dict = MessageOutputDebugTurnEventTurnEventCallout.from_dict(message_output_debug_turn_event_turn_event_callout_model_json).__dict__ + message_output_debug_turn_event_turn_event_callout_model2 = MessageOutputDebugTurnEventTurnEventCallout(**message_output_debug_turn_event_turn_event_callout_model_dict) + + # Verify the model instances are equivalent + assert message_output_debug_turn_event_turn_event_callout_model == message_output_debug_turn_event_turn_event_callout_model2 + + # Convert model instance back to dict and verify no loss of data + message_output_debug_turn_event_turn_event_callout_model_json2 = message_output_debug_turn_event_turn_event_callout_model.to_dict() + assert message_output_debug_turn_event_turn_event_callout_model_json2 == message_output_debug_turn_event_turn_event_callout_model_json + + +class TestModel_MessageOutputDebugTurnEventTurnEventClientActions: + """ + Test Class for MessageOutputDebugTurnEventTurnEventClientActions + """ + + def test_message_output_debug_turn_event_turn_event_client_actions_serialization(self): + """ + Test serialization/deserialization for MessageOutputDebugTurnEventTurnEventClientActions + """ + + # Construct dict forms of any model objects needed in order to build this model. + + turn_event_step_source_model = {} # TurnEventStepSource + turn_event_step_source_model['type'] = 'step' + turn_event_step_source_model['action'] = 'testString' + turn_event_step_source_model['action_title'] = 'testString' + turn_event_step_source_model['step'] = 'testString' + turn_event_step_source_model['is_ai_guided'] = True + turn_event_step_source_model['is_skill_based'] = True + + client_action_model = {} # ClientAction + client_action_model['name'] = 'testString' + client_action_model['result_variable'] = 'testString' + client_action_model['type'] = 'testString' + client_action_model['skill'] = 'main skill' + client_action_model['parameters'] = {'anyKey': 'anyValue'} + + # Construct a json representation of a MessageOutputDebugTurnEventTurnEventClientActions model + message_output_debug_turn_event_turn_event_client_actions_model_json = {} + message_output_debug_turn_event_turn_event_client_actions_model_json['event'] = 'client_actions' + message_output_debug_turn_event_turn_event_client_actions_model_json['source'] = turn_event_step_source_model + message_output_debug_turn_event_turn_event_client_actions_model_json['client_actions'] = [client_action_model] + + # Construct a model instance of MessageOutputDebugTurnEventTurnEventClientActions by calling from_dict on the json representation + message_output_debug_turn_event_turn_event_client_actions_model = MessageOutputDebugTurnEventTurnEventClientActions.from_dict(message_output_debug_turn_event_turn_event_client_actions_model_json) + assert message_output_debug_turn_event_turn_event_client_actions_model != False + + # Construct a model instance of MessageOutputDebugTurnEventTurnEventClientActions by calling from_dict on the json representation + message_output_debug_turn_event_turn_event_client_actions_model_dict = MessageOutputDebugTurnEventTurnEventClientActions.from_dict(message_output_debug_turn_event_turn_event_client_actions_model_json).__dict__ + message_output_debug_turn_event_turn_event_client_actions_model2 = MessageOutputDebugTurnEventTurnEventClientActions(**message_output_debug_turn_event_turn_event_client_actions_model_dict) + + # Verify the model instances are equivalent + assert message_output_debug_turn_event_turn_event_client_actions_model == message_output_debug_turn_event_turn_event_client_actions_model2 + + # Convert model instance back to dict and verify no loss of data + message_output_debug_turn_event_turn_event_client_actions_model_json2 = message_output_debug_turn_event_turn_event_client_actions_model.to_dict() + assert message_output_debug_turn_event_turn_event_client_actions_model_json2 == message_output_debug_turn_event_turn_event_client_actions_model_json + + +class TestModel_MessageOutputDebugTurnEventTurnEventConversationalSearchEnd: + """ + Test Class for MessageOutputDebugTurnEventTurnEventConversationalSearchEnd + """ + + def test_message_output_debug_turn_event_turn_event_conversational_search_end_serialization(self): + """ + Test serialization/deserialization for MessageOutputDebugTurnEventTurnEventConversationalSearchEnd + """ + + # Construct dict forms of any model objects needed in order to build this model. + + turn_event_action_source_model = {} # TurnEventActionSource + turn_event_action_source_model['type'] = 'action' + turn_event_action_source_model['action'] = 'testString' + turn_event_action_source_model['action_title'] = 'testString' + turn_event_action_source_model['condition'] = 'testString' + + # Construct a json representation of a MessageOutputDebugTurnEventTurnEventConversationalSearchEnd model + message_output_debug_turn_event_turn_event_conversational_search_end_model_json = {} + message_output_debug_turn_event_turn_event_conversational_search_end_model_json['event'] = 'conversational_search_end' + message_output_debug_turn_event_turn_event_conversational_search_end_model_json['source'] = turn_event_action_source_model + message_output_debug_turn_event_turn_event_conversational_search_end_model_json['condition_type'] = 'user_defined' + + # Construct a model instance of MessageOutputDebugTurnEventTurnEventConversationalSearchEnd by calling from_dict on the json representation + message_output_debug_turn_event_turn_event_conversational_search_end_model = MessageOutputDebugTurnEventTurnEventConversationalSearchEnd.from_dict(message_output_debug_turn_event_turn_event_conversational_search_end_model_json) + assert message_output_debug_turn_event_turn_event_conversational_search_end_model != False + + # Construct a model instance of MessageOutputDebugTurnEventTurnEventConversationalSearchEnd by calling from_dict on the json representation + message_output_debug_turn_event_turn_event_conversational_search_end_model_dict = MessageOutputDebugTurnEventTurnEventConversationalSearchEnd.from_dict(message_output_debug_turn_event_turn_event_conversational_search_end_model_json).__dict__ + message_output_debug_turn_event_turn_event_conversational_search_end_model2 = MessageOutputDebugTurnEventTurnEventConversationalSearchEnd(**message_output_debug_turn_event_turn_event_conversational_search_end_model_dict) + + # Verify the model instances are equivalent + assert message_output_debug_turn_event_turn_event_conversational_search_end_model == message_output_debug_turn_event_turn_event_conversational_search_end_model2 + + # Convert model instance back to dict and verify no loss of data + message_output_debug_turn_event_turn_event_conversational_search_end_model_json2 = message_output_debug_turn_event_turn_event_conversational_search_end_model.to_dict() + assert message_output_debug_turn_event_turn_event_conversational_search_end_model_json2 == message_output_debug_turn_event_turn_event_conversational_search_end_model_json + + +class TestModel_MessageOutputDebugTurnEventTurnEventGenerativeAICalled: + """ + Test Class for MessageOutputDebugTurnEventTurnEventGenerativeAICalled + """ + + def test_message_output_debug_turn_event_turn_event_generative_ai_called_serialization(self): + """ + Test serialization/deserialization for MessageOutputDebugTurnEventTurnEventGenerativeAICalled + """ + + # Construct dict forms of any model objects needed in order to build this model. + + generative_ai_task_confidence_scores_model = {} # GenerativeAITaskConfidenceScores + generative_ai_task_confidence_scores_model['pre_gen'] = 72.5 + generative_ai_task_confidence_scores_model['pre_gen_threshold'] = 72.5 + generative_ai_task_confidence_scores_model['post_gen'] = 72.5 + generative_ai_task_confidence_scores_model['post_gen_threshold'] = 72.5 + + generative_ai_task_model = {} # GenerativeAITaskContentGroundedAnswering + generative_ai_task_model['task'] = 'content_grounded_answering' + generative_ai_task_model['is_idk_response'] = True + generative_ai_task_model['is_hap_detected'] = True + generative_ai_task_model['confidence_scores'] = generative_ai_task_confidence_scores_model + generative_ai_task_model['original_response'] = 'testString' + generative_ai_task_model['inferred_query'] = 'testString' + + turn_event_generative_ai_called_callout_request_model = {} # TurnEventGenerativeAICalledCalloutRequest + turn_event_generative_ai_called_callout_request_model['method'] = 'GET' + turn_event_generative_ai_called_callout_request_model['url'] = 'testString' + turn_event_generative_ai_called_callout_request_model['port'] = 'testString' + turn_event_generative_ai_called_callout_request_model['path'] = 'testString' + turn_event_generative_ai_called_callout_request_model['query_parameters'] = 'testString' + turn_event_generative_ai_called_callout_request_model['headers'] = {'anyKey': 'anyValue'} + turn_event_generative_ai_called_callout_request_model['body'] = {'anyKey': 'anyValue'} + + turn_event_generative_ai_called_callout_response_model = {} # TurnEventGenerativeAICalledCalloutResponse + turn_event_generative_ai_called_callout_response_model['body'] = 'testString' + turn_event_generative_ai_called_callout_response_model['status_code'] = 38 + + turn_event_generative_ai_called_callout_search_model = {} # TurnEventGenerativeAICalledCalloutSearch + turn_event_generative_ai_called_callout_search_model['engine'] = 'testString' + turn_event_generative_ai_called_callout_search_model['index'] = 'testString' + turn_event_generative_ai_called_callout_search_model['query'] = 'testString' + turn_event_generative_ai_called_callout_search_model['request'] = turn_event_generative_ai_called_callout_request_model + turn_event_generative_ai_called_callout_search_model['response'] = turn_event_generative_ai_called_callout_response_model + + turn_event_generative_ai_called_callout_llm_response_model = {} # TurnEventGenerativeAICalledCalloutLlmResponse + turn_event_generative_ai_called_callout_llm_response_model['text'] = 'testString' + turn_event_generative_ai_called_callout_llm_response_model['response_type'] = 'testString' + turn_event_generative_ai_called_callout_llm_response_model['is_idk_response'] = True + + search_results_result_metadata_model = {} # SearchResultsResultMetadata + search_results_result_metadata_model['document_retrieval_source'] = 'testString' + search_results_result_metadata_model['score'] = 38 + + search_results_model = {} # SearchResults + search_results_model['result_metadata'] = search_results_result_metadata_model + search_results_model['id'] = 'testString' + search_results_model['title'] = 'testString' + search_results_model['body'] = 'testString' + + turn_event_generative_ai_called_callout_llm_model = {} # TurnEventGenerativeAICalledCalloutLlm + turn_event_generative_ai_called_callout_llm_model['type'] = 'testString' + turn_event_generative_ai_called_callout_llm_model['model_id'] = 'testString' + turn_event_generative_ai_called_callout_llm_model['model_class_id'] = 'testString' + turn_event_generative_ai_called_callout_llm_model['generated_token_count'] = 38 + turn_event_generative_ai_called_callout_llm_model['input_token_count'] = 38 + turn_event_generative_ai_called_callout_llm_model['success'] = True + turn_event_generative_ai_called_callout_llm_model['response'] = turn_event_generative_ai_called_callout_llm_response_model + turn_event_generative_ai_called_callout_llm_model['request'] = [search_results_model] + + turn_event_generative_ai_called_callout_model = {} # TurnEventGenerativeAICalledCallout + turn_event_generative_ai_called_callout_model['search_called'] = True + turn_event_generative_ai_called_callout_model['llm_called'] = True + turn_event_generative_ai_called_callout_model['search'] = turn_event_generative_ai_called_callout_search_model + turn_event_generative_ai_called_callout_model['llm'] = turn_event_generative_ai_called_callout_llm_model + turn_event_generative_ai_called_callout_model['idk_reason_code'] = 'testString' + + turn_event_generative_ai_called_metrics_model = {} # TurnEventGenerativeAICalledMetrics + turn_event_generative_ai_called_metrics_model['search_time_ms'] = 72.5 + turn_event_generative_ai_called_metrics_model['answer_generation_time_ms'] = 72.5 + turn_event_generative_ai_called_metrics_model['total_time_ms'] = 72.5 + + # Construct a json representation of a MessageOutputDebugTurnEventTurnEventGenerativeAICalled model + message_output_debug_turn_event_turn_event_generative_ai_called_model_json = {} + message_output_debug_turn_event_turn_event_generative_ai_called_model_json['event'] = 'generative_ai_called' + message_output_debug_turn_event_turn_event_generative_ai_called_model_json['source'] = {'anyKey': 'anyValue'} + message_output_debug_turn_event_turn_event_generative_ai_called_model_json['generative_ai_start_time'] = 'testString' + message_output_debug_turn_event_turn_event_generative_ai_called_model_json['generative_ai'] = generative_ai_task_model + message_output_debug_turn_event_turn_event_generative_ai_called_model_json['callout'] = turn_event_generative_ai_called_callout_model + message_output_debug_turn_event_turn_event_generative_ai_called_model_json['metrics'] = turn_event_generative_ai_called_metrics_model + + # Construct a model instance of MessageOutputDebugTurnEventTurnEventGenerativeAICalled by calling from_dict on the json representation + message_output_debug_turn_event_turn_event_generative_ai_called_model = MessageOutputDebugTurnEventTurnEventGenerativeAICalled.from_dict(message_output_debug_turn_event_turn_event_generative_ai_called_model_json) + assert message_output_debug_turn_event_turn_event_generative_ai_called_model != False + + # Construct a model instance of MessageOutputDebugTurnEventTurnEventGenerativeAICalled by calling from_dict on the json representation + message_output_debug_turn_event_turn_event_generative_ai_called_model_dict = MessageOutputDebugTurnEventTurnEventGenerativeAICalled.from_dict(message_output_debug_turn_event_turn_event_generative_ai_called_model_json).__dict__ + message_output_debug_turn_event_turn_event_generative_ai_called_model2 = MessageOutputDebugTurnEventTurnEventGenerativeAICalled(**message_output_debug_turn_event_turn_event_generative_ai_called_model_dict) + + # Verify the model instances are equivalent + assert message_output_debug_turn_event_turn_event_generative_ai_called_model == message_output_debug_turn_event_turn_event_generative_ai_called_model2 + + # Convert model instance back to dict and verify no loss of data + message_output_debug_turn_event_turn_event_generative_ai_called_model_json2 = message_output_debug_turn_event_turn_event_generative_ai_called_model.to_dict() + assert message_output_debug_turn_event_turn_event_generative_ai_called_model_json2 == message_output_debug_turn_event_turn_event_generative_ai_called_model_json + + +class TestModel_MessageOutputDebugTurnEventTurnEventHandlerVisited: + """ + Test Class for MessageOutputDebugTurnEventTurnEventHandlerVisited + """ + + def test_message_output_debug_turn_event_turn_event_handler_visited_serialization(self): + """ + Test serialization/deserialization for MessageOutputDebugTurnEventTurnEventHandlerVisited + """ + + # Construct dict forms of any model objects needed in order to build this model. + + turn_event_action_source_model = {} # TurnEventActionSource + turn_event_action_source_model['type'] = 'action' + turn_event_action_source_model['action'] = 'testString' + turn_event_action_source_model['action_title'] = 'testString' + turn_event_action_source_model['condition'] = 'testString' + + # Construct a json representation of a MessageOutputDebugTurnEventTurnEventHandlerVisited model + message_output_debug_turn_event_turn_event_handler_visited_model_json = {} + message_output_debug_turn_event_turn_event_handler_visited_model_json['event'] = 'handler_visited' + message_output_debug_turn_event_turn_event_handler_visited_model_json['source'] = turn_event_action_source_model + message_output_debug_turn_event_turn_event_handler_visited_model_json['action_start_time'] = 'testString' + + # Construct a model instance of MessageOutputDebugTurnEventTurnEventHandlerVisited by calling from_dict on the json representation + message_output_debug_turn_event_turn_event_handler_visited_model = MessageOutputDebugTurnEventTurnEventHandlerVisited.from_dict(message_output_debug_turn_event_turn_event_handler_visited_model_json) + assert message_output_debug_turn_event_turn_event_handler_visited_model != False + + # Construct a model instance of MessageOutputDebugTurnEventTurnEventHandlerVisited by calling from_dict on the json representation + message_output_debug_turn_event_turn_event_handler_visited_model_dict = MessageOutputDebugTurnEventTurnEventHandlerVisited.from_dict(message_output_debug_turn_event_turn_event_handler_visited_model_json).__dict__ + message_output_debug_turn_event_turn_event_handler_visited_model2 = MessageOutputDebugTurnEventTurnEventHandlerVisited(**message_output_debug_turn_event_turn_event_handler_visited_model_dict) + + # Verify the model instances are equivalent + assert message_output_debug_turn_event_turn_event_handler_visited_model == message_output_debug_turn_event_turn_event_handler_visited_model2 + + # Convert model instance back to dict and verify no loss of data + message_output_debug_turn_event_turn_event_handler_visited_model_json2 = message_output_debug_turn_event_turn_event_handler_visited_model.to_dict() + assert message_output_debug_turn_event_turn_event_handler_visited_model_json2 == message_output_debug_turn_event_turn_event_handler_visited_model_json + + +class TestModel_MessageOutputDebugTurnEventTurnEventManualRoute: + """ + Test Class for MessageOutputDebugTurnEventTurnEventManualRoute + """ + + def test_message_output_debug_turn_event_turn_event_manual_route_serialization(self): + """ + Test serialization/deserialization for MessageOutputDebugTurnEventTurnEventManualRoute + """ + + # Construct dict forms of any model objects needed in order to build this model. + + turn_event_step_source_model = {} # TurnEventStepSource + turn_event_step_source_model['type'] = 'step' + turn_event_step_source_model['action'] = 'testString' + turn_event_step_source_model['action_title'] = 'testString' + turn_event_step_source_model['step'] = 'testString' + turn_event_step_source_model['is_ai_guided'] = True + turn_event_step_source_model['is_skill_based'] = True + + # Construct a json representation of a MessageOutputDebugTurnEventTurnEventManualRoute model + message_output_debug_turn_event_turn_event_manual_route_model_json = {} + message_output_debug_turn_event_turn_event_manual_route_model_json['event'] = 'manual_route' + message_output_debug_turn_event_turn_event_manual_route_model_json['source'] = turn_event_step_source_model + message_output_debug_turn_event_turn_event_manual_route_model_json['condition_type'] = 'user_defined' + message_output_debug_turn_event_turn_event_manual_route_model_json['action_start_time'] = 'testString' + message_output_debug_turn_event_turn_event_manual_route_model_json['route_name'] = 'testString' + + # Construct a model instance of MessageOutputDebugTurnEventTurnEventManualRoute by calling from_dict on the json representation + message_output_debug_turn_event_turn_event_manual_route_model = MessageOutputDebugTurnEventTurnEventManualRoute.from_dict(message_output_debug_turn_event_turn_event_manual_route_model_json) + assert message_output_debug_turn_event_turn_event_manual_route_model != False + + # Construct a model instance of MessageOutputDebugTurnEventTurnEventManualRoute by calling from_dict on the json representation + message_output_debug_turn_event_turn_event_manual_route_model_dict = MessageOutputDebugTurnEventTurnEventManualRoute.from_dict(message_output_debug_turn_event_turn_event_manual_route_model_json).__dict__ + message_output_debug_turn_event_turn_event_manual_route_model2 = MessageOutputDebugTurnEventTurnEventManualRoute(**message_output_debug_turn_event_turn_event_manual_route_model_dict) + + # Verify the model instances are equivalent + assert message_output_debug_turn_event_turn_event_manual_route_model == message_output_debug_turn_event_turn_event_manual_route_model2 + + # Convert model instance back to dict and verify no loss of data + message_output_debug_turn_event_turn_event_manual_route_model_json2 = message_output_debug_turn_event_turn_event_manual_route_model.to_dict() + assert message_output_debug_turn_event_turn_event_manual_route_model_json2 == message_output_debug_turn_event_turn_event_manual_route_model_json + + +class TestModel_MessageOutputDebugTurnEventTurnEventNodeVisited: + """ + Test Class for MessageOutputDebugTurnEventTurnEventNodeVisited + """ + + def test_message_output_debug_turn_event_turn_event_node_visited_serialization(self): + """ + Test serialization/deserialization for MessageOutputDebugTurnEventTurnEventNodeVisited + """ + + # Construct dict forms of any model objects needed in order to build this model. + + turn_event_node_source_model = {} # TurnEventNodeSource + turn_event_node_source_model['type'] = 'dialog_node' + turn_event_node_source_model['dialog_node'] = 'testString' + turn_event_node_source_model['title'] = 'testString' + turn_event_node_source_model['condition'] = 'testString' + + # Construct a json representation of a MessageOutputDebugTurnEventTurnEventNodeVisited model + message_output_debug_turn_event_turn_event_node_visited_model_json = {} + message_output_debug_turn_event_turn_event_node_visited_model_json['event'] = 'node_visited' + message_output_debug_turn_event_turn_event_node_visited_model_json['source'] = turn_event_node_source_model + message_output_debug_turn_event_turn_event_node_visited_model_json['reason'] = 'welcome' + + # Construct a model instance of MessageOutputDebugTurnEventTurnEventNodeVisited by calling from_dict on the json representation + message_output_debug_turn_event_turn_event_node_visited_model = MessageOutputDebugTurnEventTurnEventNodeVisited.from_dict(message_output_debug_turn_event_turn_event_node_visited_model_json) + assert message_output_debug_turn_event_turn_event_node_visited_model != False + + # Construct a model instance of MessageOutputDebugTurnEventTurnEventNodeVisited by calling from_dict on the json representation + message_output_debug_turn_event_turn_event_node_visited_model_dict = MessageOutputDebugTurnEventTurnEventNodeVisited.from_dict(message_output_debug_turn_event_turn_event_node_visited_model_json).__dict__ + message_output_debug_turn_event_turn_event_node_visited_model2 = MessageOutputDebugTurnEventTurnEventNodeVisited(**message_output_debug_turn_event_turn_event_node_visited_model_dict) + + # Verify the model instances are equivalent + assert message_output_debug_turn_event_turn_event_node_visited_model == message_output_debug_turn_event_turn_event_node_visited_model2 + + # Convert model instance back to dict and verify no loss of data + message_output_debug_turn_event_turn_event_node_visited_model_json2 = message_output_debug_turn_event_turn_event_node_visited_model.to_dict() + assert message_output_debug_turn_event_turn_event_node_visited_model_json2 == message_output_debug_turn_event_turn_event_node_visited_model_json + + +class TestModel_MessageOutputDebugTurnEventTurnEventSearch: + """ + Test Class for MessageOutputDebugTurnEventTurnEventSearch + """ + + def test_message_output_debug_turn_event_turn_event_search_serialization(self): + """ + Test serialization/deserialization for MessageOutputDebugTurnEventTurnEventSearch + """ + + # Construct dict forms of any model objects needed in order to build this model. + + turn_event_action_source_model = {} # TurnEventActionSource + turn_event_action_source_model['type'] = 'action' + turn_event_action_source_model['action'] = 'testString' + turn_event_action_source_model['action_title'] = 'testString' + turn_event_action_source_model['condition'] = 'testString' + + turn_event_search_error_model = {} # TurnEventSearchError + turn_event_search_error_model['message'] = 'testString' + + # Construct a json representation of a MessageOutputDebugTurnEventTurnEventSearch model + message_output_debug_turn_event_turn_event_search_model_json = {} + message_output_debug_turn_event_turn_event_search_model_json['event'] = 'search' + message_output_debug_turn_event_turn_event_search_model_json['source'] = turn_event_action_source_model + message_output_debug_turn_event_turn_event_search_model_json['error'] = turn_event_search_error_model + + # Construct a model instance of MessageOutputDebugTurnEventTurnEventSearch by calling from_dict on the json representation + message_output_debug_turn_event_turn_event_search_model = MessageOutputDebugTurnEventTurnEventSearch.from_dict(message_output_debug_turn_event_turn_event_search_model_json) + assert message_output_debug_turn_event_turn_event_search_model != False + + # Construct a model instance of MessageOutputDebugTurnEventTurnEventSearch by calling from_dict on the json representation + message_output_debug_turn_event_turn_event_search_model_dict = MessageOutputDebugTurnEventTurnEventSearch.from_dict(message_output_debug_turn_event_turn_event_search_model_json).__dict__ + message_output_debug_turn_event_turn_event_search_model2 = MessageOutputDebugTurnEventTurnEventSearch(**message_output_debug_turn_event_turn_event_search_model_dict) + + # Verify the model instances are equivalent + assert message_output_debug_turn_event_turn_event_search_model == message_output_debug_turn_event_turn_event_search_model2 + + # Convert model instance back to dict and verify no loss of data + message_output_debug_turn_event_turn_event_search_model_json2 = message_output_debug_turn_event_turn_event_search_model.to_dict() + assert message_output_debug_turn_event_turn_event_search_model_json2 == message_output_debug_turn_event_turn_event_search_model_json + + +class TestModel_MessageOutputDebugTurnEventTurnEventStepAnswered: + """ + Test Class for MessageOutputDebugTurnEventTurnEventStepAnswered + """ + + def test_message_output_debug_turn_event_turn_event_step_answered_serialization(self): + """ + Test serialization/deserialization for MessageOutputDebugTurnEventTurnEventStepAnswered + """ + + # Construct dict forms of any model objects needed in order to build this model. + + turn_event_action_source_model = {} # TurnEventActionSource + turn_event_action_source_model['type'] = 'action' + turn_event_action_source_model['action'] = 'testString' + turn_event_action_source_model['action_title'] = 'testString' + turn_event_action_source_model['condition'] = 'testString' + + # Construct a json representation of a MessageOutputDebugTurnEventTurnEventStepAnswered model + message_output_debug_turn_event_turn_event_step_answered_model_json = {} + message_output_debug_turn_event_turn_event_step_answered_model_json['event'] = 'step_answered' + message_output_debug_turn_event_turn_event_step_answered_model_json['source'] = turn_event_action_source_model + message_output_debug_turn_event_turn_event_step_answered_model_json['condition_type'] = 'user_defined' + message_output_debug_turn_event_turn_event_step_answered_model_json['action_start_time'] = 'testString' + message_output_debug_turn_event_turn_event_step_answered_model_json['prompted'] = True + + # Construct a model instance of MessageOutputDebugTurnEventTurnEventStepAnswered by calling from_dict on the json representation + message_output_debug_turn_event_turn_event_step_answered_model = MessageOutputDebugTurnEventTurnEventStepAnswered.from_dict(message_output_debug_turn_event_turn_event_step_answered_model_json) + assert message_output_debug_turn_event_turn_event_step_answered_model != False + + # Construct a model instance of MessageOutputDebugTurnEventTurnEventStepAnswered by calling from_dict on the json representation + message_output_debug_turn_event_turn_event_step_answered_model_dict = MessageOutputDebugTurnEventTurnEventStepAnswered.from_dict(message_output_debug_turn_event_turn_event_step_answered_model_json).__dict__ + message_output_debug_turn_event_turn_event_step_answered_model2 = MessageOutputDebugTurnEventTurnEventStepAnswered(**message_output_debug_turn_event_turn_event_step_answered_model_dict) + + # Verify the model instances are equivalent + assert message_output_debug_turn_event_turn_event_step_answered_model == message_output_debug_turn_event_turn_event_step_answered_model2 + + # Convert model instance back to dict and verify no loss of data + message_output_debug_turn_event_turn_event_step_answered_model_json2 = message_output_debug_turn_event_turn_event_step_answered_model.to_dict() + assert message_output_debug_turn_event_turn_event_step_answered_model_json2 == message_output_debug_turn_event_turn_event_step_answered_model_json + + +class TestModel_MessageOutputDebugTurnEventTurnEventStepVisited: + """ + Test Class for MessageOutputDebugTurnEventTurnEventStepVisited + """ + + def test_message_output_debug_turn_event_turn_event_step_visited_serialization(self): + """ + Test serialization/deserialization for MessageOutputDebugTurnEventTurnEventStepVisited + """ + + # Construct dict forms of any model objects needed in order to build this model. + + turn_event_action_source_model = {} # TurnEventActionSource + turn_event_action_source_model['type'] = 'action' + turn_event_action_source_model['action'] = 'testString' + turn_event_action_source_model['action_title'] = 'testString' + turn_event_action_source_model['condition'] = 'testString' + + # Construct a json representation of a MessageOutputDebugTurnEventTurnEventStepVisited model + message_output_debug_turn_event_turn_event_step_visited_model_json = {} + message_output_debug_turn_event_turn_event_step_visited_model_json['event'] = 'step_visited' + message_output_debug_turn_event_turn_event_step_visited_model_json['source'] = turn_event_action_source_model + message_output_debug_turn_event_turn_event_step_visited_model_json['condition_type'] = 'user_defined' + message_output_debug_turn_event_turn_event_step_visited_model_json['action_start_time'] = 'testString' + message_output_debug_turn_event_turn_event_step_visited_model_json['has_question'] = True + + # Construct a model instance of MessageOutputDebugTurnEventTurnEventStepVisited by calling from_dict on the json representation + message_output_debug_turn_event_turn_event_step_visited_model = MessageOutputDebugTurnEventTurnEventStepVisited.from_dict(message_output_debug_turn_event_turn_event_step_visited_model_json) + assert message_output_debug_turn_event_turn_event_step_visited_model != False + + # Construct a model instance of MessageOutputDebugTurnEventTurnEventStepVisited by calling from_dict on the json representation + message_output_debug_turn_event_turn_event_step_visited_model_dict = MessageOutputDebugTurnEventTurnEventStepVisited.from_dict(message_output_debug_turn_event_turn_event_step_visited_model_json).__dict__ + message_output_debug_turn_event_turn_event_step_visited_model2 = MessageOutputDebugTurnEventTurnEventStepVisited(**message_output_debug_turn_event_turn_event_step_visited_model_dict) + + # Verify the model instances are equivalent + assert message_output_debug_turn_event_turn_event_step_visited_model == message_output_debug_turn_event_turn_event_step_visited_model2 + + # Convert model instance back to dict and verify no loss of data + message_output_debug_turn_event_turn_event_step_visited_model_json2 = message_output_debug_turn_event_turn_event_step_visited_model.to_dict() + assert message_output_debug_turn_event_turn_event_step_visited_model_json2 == message_output_debug_turn_event_turn_event_step_visited_model_json + + +class TestModel_MessageOutputDebugTurnEventTurnEventSuggestionIntentsDenied: + """ + Test Class for MessageOutputDebugTurnEventTurnEventSuggestionIntentsDenied + """ + + def test_message_output_debug_turn_event_turn_event_suggestion_intents_denied_serialization(self): + """ + Test serialization/deserialization for MessageOutputDebugTurnEventTurnEventSuggestionIntentsDenied + """ + + # Construct dict forms of any model objects needed in order to build this model. + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + runtime_intent_model['skill'] = 'testString' + + # Construct a json representation of a MessageOutputDebugTurnEventTurnEventSuggestionIntentsDenied model + message_output_debug_turn_event_turn_event_suggestion_intents_denied_model_json = {} + message_output_debug_turn_event_turn_event_suggestion_intents_denied_model_json['event'] = 'suggestion_intents_denied' + message_output_debug_turn_event_turn_event_suggestion_intents_denied_model_json['intents_denied'] = [runtime_intent_model] + + # Construct a model instance of MessageOutputDebugTurnEventTurnEventSuggestionIntentsDenied by calling from_dict on the json representation + message_output_debug_turn_event_turn_event_suggestion_intents_denied_model = MessageOutputDebugTurnEventTurnEventSuggestionIntentsDenied.from_dict(message_output_debug_turn_event_turn_event_suggestion_intents_denied_model_json) + assert message_output_debug_turn_event_turn_event_suggestion_intents_denied_model != False + + # Construct a model instance of MessageOutputDebugTurnEventTurnEventSuggestionIntentsDenied by calling from_dict on the json representation + message_output_debug_turn_event_turn_event_suggestion_intents_denied_model_dict = MessageOutputDebugTurnEventTurnEventSuggestionIntentsDenied.from_dict(message_output_debug_turn_event_turn_event_suggestion_intents_denied_model_json).__dict__ + message_output_debug_turn_event_turn_event_suggestion_intents_denied_model2 = MessageOutputDebugTurnEventTurnEventSuggestionIntentsDenied(**message_output_debug_turn_event_turn_event_suggestion_intents_denied_model_dict) + + # Verify the model instances are equivalent + assert message_output_debug_turn_event_turn_event_suggestion_intents_denied_model == message_output_debug_turn_event_turn_event_suggestion_intents_denied_model2 + + # Convert model instance back to dict and verify no loss of data + message_output_debug_turn_event_turn_event_suggestion_intents_denied_model_json2 = message_output_debug_turn_event_turn_event_suggestion_intents_denied_model.to_dict() + assert message_output_debug_turn_event_turn_event_suggestion_intents_denied_model_json2 == message_output_debug_turn_event_turn_event_suggestion_intents_denied_model_json + + +class TestModel_MessageOutputDebugTurnEventTurnEventTopicSwitchDenied: + """ + Test Class for MessageOutputDebugTurnEventTurnEventTopicSwitchDenied + """ + + def test_message_output_debug_turn_event_turn_event_topic_switch_denied_serialization(self): + """ + Test serialization/deserialization for MessageOutputDebugTurnEventTurnEventTopicSwitchDenied + """ + + # Construct dict forms of any model objects needed in order to build this model. + + turn_event_action_source_model = {} # TurnEventActionSource + turn_event_action_source_model['type'] = 'action' + turn_event_action_source_model['action'] = 'testString' + turn_event_action_source_model['action_title'] = 'testString' + turn_event_action_source_model['condition'] = 'testString' + + # Construct a json representation of a MessageOutputDebugTurnEventTurnEventTopicSwitchDenied model + message_output_debug_turn_event_turn_event_topic_switch_denied_model_json = {} + message_output_debug_turn_event_turn_event_topic_switch_denied_model_json['event'] = 'topic_switch_denied' + message_output_debug_turn_event_turn_event_topic_switch_denied_model_json['source'] = turn_event_action_source_model + message_output_debug_turn_event_turn_event_topic_switch_denied_model_json['condition_type'] = 'user_defined' + message_output_debug_turn_event_turn_event_topic_switch_denied_model_json['reason'] = 'action_conditions_failed' + + # Construct a model instance of MessageOutputDebugTurnEventTurnEventTopicSwitchDenied by calling from_dict on the json representation + message_output_debug_turn_event_turn_event_topic_switch_denied_model = MessageOutputDebugTurnEventTurnEventTopicSwitchDenied.from_dict(message_output_debug_turn_event_turn_event_topic_switch_denied_model_json) + assert message_output_debug_turn_event_turn_event_topic_switch_denied_model != False + + # Construct a model instance of MessageOutputDebugTurnEventTurnEventTopicSwitchDenied by calling from_dict on the json representation + message_output_debug_turn_event_turn_event_topic_switch_denied_model_dict = MessageOutputDebugTurnEventTurnEventTopicSwitchDenied.from_dict(message_output_debug_turn_event_turn_event_topic_switch_denied_model_json).__dict__ + message_output_debug_turn_event_turn_event_topic_switch_denied_model2 = MessageOutputDebugTurnEventTurnEventTopicSwitchDenied(**message_output_debug_turn_event_turn_event_topic_switch_denied_model_dict) + + # Verify the model instances are equivalent + assert message_output_debug_turn_event_turn_event_topic_switch_denied_model == message_output_debug_turn_event_turn_event_topic_switch_denied_model2 + + # Convert model instance back to dict and verify no loss of data + message_output_debug_turn_event_turn_event_topic_switch_denied_model_json2 = message_output_debug_turn_event_turn_event_topic_switch_denied_model.to_dict() + assert message_output_debug_turn_event_turn_event_topic_switch_denied_model_json2 == message_output_debug_turn_event_turn_event_topic_switch_denied_model_json + + +class TestModel_MessageStreamResponseMessageStreamCompleteItem: + """ + Test Class for MessageStreamResponseMessageStreamCompleteItem + """ + + def test_message_stream_response_message_stream_complete_item_serialization(self): + """ + Test serialization/deserialization for MessageStreamResponseMessageStreamCompleteItem + """ + + # Construct dict forms of any model objects needed in order to build this model. + + metadata_model = {} # Metadata + metadata_model['id'] = 38 + + complete_item_model = {} # CompleteItem + complete_item_model['streaming_metadata'] = metadata_model + + # Construct a json representation of a MessageStreamResponseMessageStreamCompleteItem model + message_stream_response_message_stream_complete_item_model_json = {} + message_stream_response_message_stream_complete_item_model_json['complete_item'] = complete_item_model + + # Construct a model instance of MessageStreamResponseMessageStreamCompleteItem by calling from_dict on the json representation + message_stream_response_message_stream_complete_item_model = MessageStreamResponseMessageStreamCompleteItem.from_dict(message_stream_response_message_stream_complete_item_model_json) + assert message_stream_response_message_stream_complete_item_model != False + + # Construct a model instance of MessageStreamResponseMessageStreamCompleteItem by calling from_dict on the json representation + message_stream_response_message_stream_complete_item_model_dict = MessageStreamResponseMessageStreamCompleteItem.from_dict(message_stream_response_message_stream_complete_item_model_json).__dict__ + message_stream_response_message_stream_complete_item_model2 = MessageStreamResponseMessageStreamCompleteItem(**message_stream_response_message_stream_complete_item_model_dict) + + # Verify the model instances are equivalent + assert message_stream_response_message_stream_complete_item_model == message_stream_response_message_stream_complete_item_model2 + + # Convert model instance back to dict and verify no loss of data + message_stream_response_message_stream_complete_item_model_json2 = message_stream_response_message_stream_complete_item_model.to_dict() + assert message_stream_response_message_stream_complete_item_model_json2 == message_stream_response_message_stream_complete_item_model_json + + +class TestModel_MessageStreamResponseMessageStreamPartialItem: + """ + Test Class for MessageStreamResponseMessageStreamPartialItem + """ + + def test_message_stream_response_message_stream_partial_item_serialization(self): + """ + Test serialization/deserialization for MessageStreamResponseMessageStreamPartialItem + """ + + # Construct dict forms of any model objects needed in order to build this model. + + metadata_model = {} # Metadata + metadata_model['id'] = 38 + + partial_item_model = {} # PartialItem + partial_item_model['response_type'] = 'testString' + partial_item_model['text'] = 'testString' + partial_item_model['streaming_metadata'] = metadata_model + + # Construct a json representation of a MessageStreamResponseMessageStreamPartialItem model + message_stream_response_message_stream_partial_item_model_json = {} + message_stream_response_message_stream_partial_item_model_json['partial_item'] = partial_item_model + + # Construct a model instance of MessageStreamResponseMessageStreamPartialItem by calling from_dict on the json representation + message_stream_response_message_stream_partial_item_model = MessageStreamResponseMessageStreamPartialItem.from_dict(message_stream_response_message_stream_partial_item_model_json) + assert message_stream_response_message_stream_partial_item_model != False + + # Construct a model instance of MessageStreamResponseMessageStreamPartialItem by calling from_dict on the json representation + message_stream_response_message_stream_partial_item_model_dict = MessageStreamResponseMessageStreamPartialItem.from_dict(message_stream_response_message_stream_partial_item_model_json).__dict__ + message_stream_response_message_stream_partial_item_model2 = MessageStreamResponseMessageStreamPartialItem(**message_stream_response_message_stream_partial_item_model_dict) + + # Verify the model instances are equivalent + assert message_stream_response_message_stream_partial_item_model == message_stream_response_message_stream_partial_item_model2 + + # Convert model instance back to dict and verify no loss of data + message_stream_response_message_stream_partial_item_model_json2 = message_stream_response_message_stream_partial_item_model.to_dict() + assert message_stream_response_message_stream_partial_item_model_json2 == message_stream_response_message_stream_partial_item_model_json + + +class TestModel_MessageStreamResponseStatefulMessageStreamFinalResponse: + """ + Test Class for MessageStreamResponseStatefulMessageStreamFinalResponse + """ + + def test_message_stream_response_stateful_message_stream_final_response_serialization(self): + """ + Test serialization/deserialization for MessageStreamResponseStatefulMessageStreamFinalResponse + """ + + # Construct dict forms of any model objects needed in order to build this model. + + response_generic_citation_ranges_item_model = {} # ResponseGenericCitationRangesItem + response_generic_citation_ranges_item_model['start'] = 38 + response_generic_citation_ranges_item_model['end'] = 38 + + response_generic_citation_model = {} # ResponseGenericCitation + response_generic_citation_model['title'] = 'testString' + response_generic_citation_model['text'] = 'testString' + response_generic_citation_model['body'] = 'testString' + response_generic_citation_model['search_result_index'] = 38 + response_generic_citation_model['ranges'] = [response_generic_citation_ranges_item_model] + + response_generic_confidence_scores_model = {} # ResponseGenericConfidenceScores + response_generic_confidence_scores_model['threshold'] = 72.5 + response_generic_confidence_scores_model['pre_gen'] = 72.5 + response_generic_confidence_scores_model['post_gen'] = 72.5 + response_generic_confidence_scores_model['extractiveness'] = 72.5 + + search_results_result_metadata_model = {} # SearchResultsResultMetadata + search_results_result_metadata_model['document_retrieval_source'] = 'testString' + search_results_result_metadata_model['score'] = 38 + + search_results_model = {} # SearchResults + search_results_model['result_metadata'] = search_results_result_metadata_model + search_results_model['id'] = 'testString' + search_results_model['title'] = 'testString' + search_results_model['body'] = 'testString' + + runtime_response_generic_model = {} # RuntimeResponseGenericRuntimeResponseTypeConversationalSearch + runtime_response_generic_model['response_type'] = 'conversation_search' + runtime_response_generic_model['text'] = 'testString' + runtime_response_generic_model['citations_title'] = 'testString' + runtime_response_generic_model['citations'] = [response_generic_citation_model] + runtime_response_generic_model['confidence_scores'] = response_generic_confidence_scores_model + runtime_response_generic_model['response_length_option'] = 'testString' + runtime_response_generic_model['search_results'] = [search_results_model] + runtime_response_generic_model['disclaimer'] = 'testString' + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + runtime_intent_model['skill'] = 'testString' + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + runtime_entity_model['skill'] = 'testString' + + dialog_node_action_model = {} # DialogNodeAction + dialog_node_action_model['name'] = 'testString' + dialog_node_action_model['type'] = 'client' + dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'} + dialog_node_action_model['result_variable'] = 'testString' + dialog_node_action_model['credentials'] = 'testString' + + dialog_node_visited_model = {} # DialogNodeVisited + dialog_node_visited_model['dialog_node'] = 'testString' + dialog_node_visited_model['title'] = 'testString' + dialog_node_visited_model['conditions'] = 'testString' + + log_message_source_model = {} # LogMessageSourceDialogNode + log_message_source_model['type'] = 'dialog_node' + log_message_source_model['dialog_node'] = 'testString' + + dialog_log_message_model = {} # DialogLogMessage + dialog_log_message_model['level'] = 'info' + dialog_log_message_model['message'] = 'testString' + dialog_log_message_model['code'] = 'testString' + dialog_log_message_model['source'] = log_message_source_model + + turn_event_action_source_model = {} # TurnEventActionSource + turn_event_action_source_model['type'] = 'action' + turn_event_action_source_model['action'] = 'testString' + turn_event_action_source_model['action_title'] = 'testString' + turn_event_action_source_model['condition'] = 'testString' + + message_output_debug_turn_event_model = {} # MessageOutputDebugTurnEventTurnEventActionVisited + message_output_debug_turn_event_model['event'] = 'action_visited' + message_output_debug_turn_event_model['source'] = turn_event_action_source_model + message_output_debug_turn_event_model['action_start_time'] = 'testString' + message_output_debug_turn_event_model['condition_type'] = 'user_defined' + message_output_debug_turn_event_model['reason'] = 'intent' + message_output_debug_turn_event_model['result_variable'] = 'testString' + + message_output_debug_model = {} # MessageOutputDebug + message_output_debug_model['nodes_visited'] = [dialog_node_visited_model] + message_output_debug_model['log_messages'] = [dialog_log_message_model] + message_output_debug_model['branch_exited'] = True + message_output_debug_model['branch_exited_reason'] = 'completed' + message_output_debug_model['turn_events'] = [message_output_debug_turn_event_model] + + message_output_spelling_model = {} # MessageOutputSpelling + message_output_spelling_model['text'] = 'testString' + message_output_spelling_model['original_text'] = 'testString' + message_output_spelling_model['suggested_text'] = 'testString' + + message_output_llm_metadata_model = {} # MessageOutputLLMMetadata + message_output_llm_metadata_model['task'] = 'testString' + message_output_llm_metadata_model['model_id'] = 'testString' + + metadata_model = {} # Metadata + metadata_model['id'] = 38 + + message_stream_metadata_model = {} # MessageStreamMetadata + message_stream_metadata_model['streaming_metadata'] = metadata_model + + final_response_output_model = {} # FinalResponseOutput + final_response_output_model['generic'] = [runtime_response_generic_model] + final_response_output_model['intents'] = [runtime_intent_model] + final_response_output_model['entities'] = [runtime_entity_model] + final_response_output_model['actions'] = [dialog_node_action_model] + final_response_output_model['debug'] = message_output_debug_model + final_response_output_model['user_defined'] = {'anyKey': 'anyValue'} + final_response_output_model['spelling'] = message_output_spelling_model + final_response_output_model['llm_metadata'] = [message_output_llm_metadata_model] + final_response_output_model['streaming_metadata'] = message_stream_metadata_model + + message_context_global_system_model = {} # MessageContextGlobalSystem + message_context_global_system_model['timezone'] = 'testString' + message_context_global_system_model['user_id'] = 'testString' + message_context_global_system_model['turn_count'] = 38 + message_context_global_system_model['locale'] = 'en-us' + message_context_global_system_model['reference_time'] = 'testString' + message_context_global_system_model['session_start_time'] = 'testString' + message_context_global_system_model['state'] = 'testString' + message_context_global_system_model['skip_user_input'] = True + + message_context_global_model = {} # MessageContextGlobal + message_context_global_model['system'] = message_context_global_system_model + + message_context_skill_system_model = {} # MessageContextSkillSystem + message_context_skill_system_model['state'] = 'testString' + message_context_skill_system_model['foo'] = 'testString' + + message_context_dialog_skill_model = {} # MessageContextDialogSkill + message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'} + message_context_dialog_skill_model['system'] = message_context_skill_system_model + + message_context_action_skill_model = {} # MessageContextActionSkill + message_context_action_skill_model['user_defined'] = {'anyKey': 'anyValue'} + message_context_action_skill_model['system'] = message_context_skill_system_model + message_context_action_skill_model['action_variables'] = {'anyKey': 'anyValue'} + message_context_action_skill_model['skill_variables'] = {'anyKey': 'anyValue'} + + message_context_skills_model = {} # MessageContextSkills + message_context_skills_model['main skill'] = message_context_dialog_skill_model + message_context_skills_model['actions skill'] = message_context_action_skill_model + + message_context_model = {} # MessageContext + message_context_model['global'] = message_context_global_model + message_context_model['skills'] = message_context_skills_model + message_context_model['integrations'] = {'anyKey': 'anyValue'} + + message_output_model = {} # MessageOutput + message_output_model['generic'] = [runtime_response_generic_model] + message_output_model['intents'] = [runtime_intent_model] + message_output_model['entities'] = [runtime_entity_model] + message_output_model['actions'] = [dialog_node_action_model] + message_output_model['debug'] = message_output_debug_model + message_output_model['user_defined'] = {'anyKey': 'anyValue'} + message_output_model['spelling'] = message_output_spelling_model + message_output_model['llm_metadata'] = [message_output_llm_metadata_model] + + message_input_attachment_model = {} # MessageInputAttachment + message_input_attachment_model['url'] = 'testString' + message_input_attachment_model['media_type'] = 'testString' + + request_analytics_model = {} # RequestAnalytics + request_analytics_model['browser'] = 'testString' + request_analytics_model['device'] = 'testString' + request_analytics_model['pageUrl'] = 'testString' + + message_input_options_spelling_model = {} # MessageInputOptionsSpelling + message_input_options_spelling_model['suggestions'] = True + message_input_options_spelling_model['auto_correct'] = True + + message_input_options_model = {} # MessageInputOptions + message_input_options_model['restart'] = False + message_input_options_model['alternate_intents'] = False + message_input_options_model['async_callout'] = False + message_input_options_model['spelling'] = message_input_options_spelling_model + message_input_options_model['debug'] = False + message_input_options_model['return_context'] = False + message_input_options_model['export'] = False + + message_input_model = {} # MessageInput + message_input_model['message_type'] = 'text' + message_input_model['text'] = 'testString' + message_input_model['intents'] = [runtime_intent_model] + message_input_model['entities'] = [runtime_entity_model] + message_input_model['suggestion_id'] = 'testString' + message_input_model['attachments'] = [message_input_attachment_model] + message_input_model['analytics'] = request_analytics_model + message_input_model['options'] = message_input_options_model + + final_response_model = {} # FinalResponse + final_response_model['output'] = final_response_output_model + final_response_model['context'] = message_context_model + final_response_model['user_id'] = 'testString' + final_response_model['masked_output'] = message_output_model + final_response_model['masked_input'] = message_input_model + + # Construct a json representation of a MessageStreamResponseStatefulMessageStreamFinalResponse model + message_stream_response_stateful_message_stream_final_response_model_json = {} + message_stream_response_stateful_message_stream_final_response_model_json['final_response'] = final_response_model + + # Construct a model instance of MessageStreamResponseStatefulMessageStreamFinalResponse by calling from_dict on the json representation + message_stream_response_stateful_message_stream_final_response_model = MessageStreamResponseStatefulMessageStreamFinalResponse.from_dict(message_stream_response_stateful_message_stream_final_response_model_json) + assert message_stream_response_stateful_message_stream_final_response_model != False + + # Construct a model instance of MessageStreamResponseStatefulMessageStreamFinalResponse by calling from_dict on the json representation + message_stream_response_stateful_message_stream_final_response_model_dict = MessageStreamResponseStatefulMessageStreamFinalResponse.from_dict(message_stream_response_stateful_message_stream_final_response_model_json).__dict__ + message_stream_response_stateful_message_stream_final_response_model2 = MessageStreamResponseStatefulMessageStreamFinalResponse(**message_stream_response_stateful_message_stream_final_response_model_dict) + + # Verify the model instances are equivalent + assert message_stream_response_stateful_message_stream_final_response_model == message_stream_response_stateful_message_stream_final_response_model2 + + # Convert model instance back to dict and verify no loss of data + message_stream_response_stateful_message_stream_final_response_model_json2 = message_stream_response_stateful_message_stream_final_response_model.to_dict() + assert message_stream_response_stateful_message_stream_final_response_model_json2 == message_stream_response_stateful_message_stream_final_response_model_json + + +class TestModel_ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2AuthorizationCode: + """ + Test Class for ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2AuthorizationCode + """ + + def test_provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_serialization(self): + """ + Test serialization/deserialization for ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2AuthorizationCode + """ + + # Construct a json representation of a ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2AuthorizationCode model + provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model_json = {} + provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model_json['token_url'] = 'testString' + provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model_json['refresh_url'] = 'testString' + provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model_json['client_auth_type'] = 'Body' + provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model_json['content_type'] = 'testString' + provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model_json['header_prefix'] = 'testString' + provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model_json['authorization_url'] = 'testString' + provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model_json['redirect_uri'] = 'testString' + + # Construct a model instance of ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2AuthorizationCode by calling from_dict on the json representation + provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model = ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2AuthorizationCode.from_dict(provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model_json) + assert provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model != False + + # Construct a model instance of ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2AuthorizationCode by calling from_dict on the json representation + provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model_dict = ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2AuthorizationCode.from_dict(provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model_json).__dict__ + provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model2 = ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2AuthorizationCode(**provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model_dict) + + # Verify the model instances are equivalent + assert provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model == provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model2 + + # Convert model instance back to dict and verify no loss of data + provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model_json2 = provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model.to_dict() + assert provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model_json2 == provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model_json + + +class TestModel_ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2ClientCredentials: + """ + Test Class for ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2ClientCredentials + """ + + def test_provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_serialization(self): + """ + Test serialization/deserialization for ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2ClientCredentials + """ + + # Construct a json representation of a ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2ClientCredentials model + provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model_json = {} + provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model_json['token_url'] = 'testString' + provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model_json['refresh_url'] = 'testString' + provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model_json['client_auth_type'] = 'Body' + provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model_json['content_type'] = 'testString' + provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model_json['header_prefix'] = 'testString' + + # Construct a model instance of ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2ClientCredentials by calling from_dict on the json representation + provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model = ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2ClientCredentials.from_dict(provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model_json) + assert provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model != False + + # Construct a model instance of ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2ClientCredentials by calling from_dict on the json representation + provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model_dict = ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2ClientCredentials.from_dict(provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model_json).__dict__ + provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model2 = ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2ClientCredentials(**provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model_dict) + + # Verify the model instances are equivalent + assert provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model == provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model2 + + # Convert model instance back to dict and verify no loss of data + provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model_json2 = provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model.to_dict() + assert provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model_json2 == provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model_json + + +class TestModel_ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password: + """ + Test Class for ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password + """ + + def test_provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_serialization(self): + """ + Test serialization/deserialization for ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password + """ + + # Construct dict forms of any model objects needed in order to build this model. + + provider_authentication_o_auth2_password_username_model = {} # ProviderAuthenticationOAuth2PasswordUsername + provider_authentication_o_auth2_password_username_model['type'] = 'value' + provider_authentication_o_auth2_password_username_model['value'] = 'testString' + + # Construct a json representation of a ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password model + provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model_json = {} + provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model_json['token_url'] = 'testString' + provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model_json['refresh_url'] = 'testString' + provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model_json['client_auth_type'] = 'Body' + provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model_json['content_type'] = 'testString' + provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model_json['header_prefix'] = 'testString' + provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model_json['username'] = provider_authentication_o_auth2_password_username_model + + # Construct a model instance of ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password by calling from_dict on the json representation + provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model = ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password.from_dict(provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model_json) + assert provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model != False + + # Construct a model instance of ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password by calling from_dict on the json representation + provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model_dict = ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password.from_dict(provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model_json).__dict__ + provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model2 = ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password(**provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model_dict) + + # Verify the model instances are equivalent + assert provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model == provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model2 + + # Convert model instance back to dict and verify no loss of data + provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model_json2 = provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model.to_dict() + assert provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model_json2 == provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model_json + + +class TestModel_ProviderPrivateAuthenticationBasicFlow: + """ + Test Class for ProviderPrivateAuthenticationBasicFlow + """ + + def test_provider_private_authentication_basic_flow_serialization(self): + """ + Test serialization/deserialization for ProviderPrivateAuthenticationBasicFlow + """ + + # Construct dict forms of any model objects needed in order to build this model. + + provider_authentication_type_and_value_model = {} # ProviderAuthenticationTypeAndValue + provider_authentication_type_and_value_model['type'] = 'value' + provider_authentication_type_and_value_model['value'] = 'testString' + + # Construct a json representation of a ProviderPrivateAuthenticationBasicFlow model + provider_private_authentication_basic_flow_model_json = {} + provider_private_authentication_basic_flow_model_json['password'] = provider_authentication_type_and_value_model + + # Construct a model instance of ProviderPrivateAuthenticationBasicFlow by calling from_dict on the json representation + provider_private_authentication_basic_flow_model = ProviderPrivateAuthenticationBasicFlow.from_dict(provider_private_authentication_basic_flow_model_json) + assert provider_private_authentication_basic_flow_model != False + + # Construct a model instance of ProviderPrivateAuthenticationBasicFlow by calling from_dict on the json representation + provider_private_authentication_basic_flow_model_dict = ProviderPrivateAuthenticationBasicFlow.from_dict(provider_private_authentication_basic_flow_model_json).__dict__ + provider_private_authentication_basic_flow_model2 = ProviderPrivateAuthenticationBasicFlow(**provider_private_authentication_basic_flow_model_dict) + + # Verify the model instances are equivalent + assert provider_private_authentication_basic_flow_model == provider_private_authentication_basic_flow_model2 + + # Convert model instance back to dict and verify no loss of data + provider_private_authentication_basic_flow_model_json2 = provider_private_authentication_basic_flow_model.to_dict() + assert provider_private_authentication_basic_flow_model_json2 == provider_private_authentication_basic_flow_model_json + + +class TestModel_ProviderPrivateAuthenticationBearerFlow: + """ + Test Class for ProviderPrivateAuthenticationBearerFlow + """ + + def test_provider_private_authentication_bearer_flow_serialization(self): + """ + Test serialization/deserialization for ProviderPrivateAuthenticationBearerFlow + """ + + # Construct dict forms of any model objects needed in order to build this model. + + provider_authentication_type_and_value_model = {} # ProviderAuthenticationTypeAndValue + provider_authentication_type_and_value_model['type'] = 'value' + provider_authentication_type_and_value_model['value'] = 'testString' + + # Construct a json representation of a ProviderPrivateAuthenticationBearerFlow model + provider_private_authentication_bearer_flow_model_json = {} + provider_private_authentication_bearer_flow_model_json['token'] = provider_authentication_type_and_value_model + + # Construct a model instance of ProviderPrivateAuthenticationBearerFlow by calling from_dict on the json representation + provider_private_authentication_bearer_flow_model = ProviderPrivateAuthenticationBearerFlow.from_dict(provider_private_authentication_bearer_flow_model_json) + assert provider_private_authentication_bearer_flow_model != False + + # Construct a model instance of ProviderPrivateAuthenticationBearerFlow by calling from_dict on the json representation + provider_private_authentication_bearer_flow_model_dict = ProviderPrivateAuthenticationBearerFlow.from_dict(provider_private_authentication_bearer_flow_model_json).__dict__ + provider_private_authentication_bearer_flow_model2 = ProviderPrivateAuthenticationBearerFlow(**provider_private_authentication_bearer_flow_model_dict) + + # Verify the model instances are equivalent + assert provider_private_authentication_bearer_flow_model == provider_private_authentication_bearer_flow_model2 + + # Convert model instance back to dict and verify no loss of data + provider_private_authentication_bearer_flow_model_json2 = provider_private_authentication_bearer_flow_model.to_dict() + assert provider_private_authentication_bearer_flow_model_json2 == provider_private_authentication_bearer_flow_model_json + + +class TestModel_ProviderPrivateAuthenticationOAuth2Flow: + """ + Test Class for ProviderPrivateAuthenticationOAuth2Flow + """ + + def test_provider_private_authentication_o_auth2_flow_serialization(self): + """ + Test serialization/deserialization for ProviderPrivateAuthenticationOAuth2Flow + """ + + # Construct dict forms of any model objects needed in order to build this model. + + provider_private_authentication_o_auth2_password_password_model = {} # ProviderPrivateAuthenticationOAuth2PasswordPassword + provider_private_authentication_o_auth2_password_password_model['type'] = 'value' + provider_private_authentication_o_auth2_password_password_model['value'] = 'testString' + + provider_private_authentication_o_auth2_flow_flows_model = {} # ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2Password + provider_private_authentication_o_auth2_flow_flows_model['client_id'] = 'testString' + provider_private_authentication_o_auth2_flow_flows_model['client_secret'] = 'testString' + provider_private_authentication_o_auth2_flow_flows_model['access_token'] = 'testString' + provider_private_authentication_o_auth2_flow_flows_model['refresh_token'] = 'testString' + provider_private_authentication_o_auth2_flow_flows_model['password'] = provider_private_authentication_o_auth2_password_password_model + + # Construct a json representation of a ProviderPrivateAuthenticationOAuth2Flow model + provider_private_authentication_o_auth2_flow_model_json = {} + provider_private_authentication_o_auth2_flow_model_json['flows'] = provider_private_authentication_o_auth2_flow_flows_model + + # Construct a model instance of ProviderPrivateAuthenticationOAuth2Flow by calling from_dict on the json representation + provider_private_authentication_o_auth2_flow_model = ProviderPrivateAuthenticationOAuth2Flow.from_dict(provider_private_authentication_o_auth2_flow_model_json) + assert provider_private_authentication_o_auth2_flow_model != False + + # Construct a model instance of ProviderPrivateAuthenticationOAuth2Flow by calling from_dict on the json representation + provider_private_authentication_o_auth2_flow_model_dict = ProviderPrivateAuthenticationOAuth2Flow.from_dict(provider_private_authentication_o_auth2_flow_model_json).__dict__ + provider_private_authentication_o_auth2_flow_model2 = ProviderPrivateAuthenticationOAuth2Flow(**provider_private_authentication_o_auth2_flow_model_dict) + + # Verify the model instances are equivalent + assert provider_private_authentication_o_auth2_flow_model == provider_private_authentication_o_auth2_flow_model2 + + # Convert model instance back to dict and verify no loss of data + provider_private_authentication_o_auth2_flow_model_json2 = provider_private_authentication_o_auth2_flow_model.to_dict() + assert provider_private_authentication_o_auth2_flow_model_json2 == provider_private_authentication_o_auth2_flow_model_json + + +class TestModel_ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2AuthorizationCode: + """ + Test Class for ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2AuthorizationCode + """ + + def test_provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_serialization(self): + """ + Test serialization/deserialization for ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2AuthorizationCode + """ + + # Construct a json representation of a ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2AuthorizationCode model + provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model_json = {} + provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model_json['client_id'] = 'testString' + provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model_json['client_secret'] = 'testString' + provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model_json['access_token'] = 'testString' + provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model_json['refresh_token'] = 'testString' + provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model_json['authorization_code'] = 'testString' + + # Construct a model instance of ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2AuthorizationCode by calling from_dict on the json representation + provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model = ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2AuthorizationCode.from_dict(provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model_json) + assert provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model != False + + # Construct a model instance of ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2AuthorizationCode by calling from_dict on the json representation + provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model_dict = ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2AuthorizationCode.from_dict(provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model_json).__dict__ + provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model2 = ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2AuthorizationCode(**provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model_dict) + + # Verify the model instances are equivalent + assert provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model == provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model2 + + # Convert model instance back to dict and verify no loss of data + provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model_json2 = provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model.to_dict() + assert provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model_json2 == provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model_json + + +class TestModel_ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2ClientCredentials: + """ + Test Class for ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2ClientCredentials + """ + + def test_provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_serialization(self): + """ + Test serialization/deserialization for ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2ClientCredentials + """ + + # Construct a json representation of a ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2ClientCredentials model + provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_model_json = {} + provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_model_json['client_id'] = 'testString' + provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_model_json['client_secret'] = 'testString' + provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_model_json['access_token'] = 'testString' + provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_model_json['refresh_token'] = 'testString' + + # Construct a model instance of ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2ClientCredentials by calling from_dict on the json representation + provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_model = ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2ClientCredentials.from_dict(provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_model_json) + assert provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_model != False + + # Construct a model instance of ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2ClientCredentials by calling from_dict on the json representation + provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_model_dict = ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2ClientCredentials.from_dict(provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_model_json).__dict__ + provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_model2 = ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2ClientCredentials(**provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_model_dict) + + # Verify the model instances are equivalent + assert provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_model == provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_model2 + + # Convert model instance back to dict and verify no loss of data + provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_model_json2 = provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_model.to_dict() + assert provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_model_json2 == provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_model_json + + +class TestModel_ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2Password: + """ + Test Class for ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2Password + """ + + def test_provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_serialization(self): + """ + Test serialization/deserialization for ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2Password + """ + + # Construct dict forms of any model objects needed in order to build this model. + + provider_private_authentication_o_auth2_password_password_model = {} # ProviderPrivateAuthenticationOAuth2PasswordPassword + provider_private_authentication_o_auth2_password_password_model['type'] = 'value' + provider_private_authentication_o_auth2_password_password_model['value'] = 'testString' + + # Construct a json representation of a ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2Password model + provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model_json = {} + provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model_json['client_id'] = 'testString' + provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model_json['client_secret'] = 'testString' + provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model_json['access_token'] = 'testString' + provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model_json['refresh_token'] = 'testString' + provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model_json['password'] = provider_private_authentication_o_auth2_password_password_model + + # Construct a model instance of ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2Password by calling from_dict on the json representation + provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model = ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2Password.from_dict(provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model_json) + assert provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model != False + + # Construct a model instance of ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2Password by calling from_dict on the json representation + provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model_dict = ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2Password.from_dict(provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model_json).__dict__ + provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model2 = ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2Password(**provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model_dict) + + # Verify the model instances are equivalent + assert provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model == provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model2 + + # Convert model instance back to dict and verify no loss of data + provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model_json2 = provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model.to_dict() + assert provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model_json2 == provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model_json + + +class TestModel_RuntimeResponseGenericRuntimeResponseTypeAudio: + """ + Test Class for RuntimeResponseGenericRuntimeResponseTypeAudio + """ + + def test_runtime_response_generic_runtime_response_type_audio_serialization(self): + """ + Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeAudio + """ + + # Construct dict forms of any model objects needed in order to build this model. + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'testString' + + # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeAudio model + runtime_response_generic_runtime_response_type_audio_model_json = {} + runtime_response_generic_runtime_response_type_audio_model_json['response_type'] = 'audio' + runtime_response_generic_runtime_response_type_audio_model_json['source'] = 'testString' + runtime_response_generic_runtime_response_type_audio_model_json['title'] = 'testString' + runtime_response_generic_runtime_response_type_audio_model_json['description'] = 'testString' + runtime_response_generic_runtime_response_type_audio_model_json['channels'] = [response_generic_channel_model] + runtime_response_generic_runtime_response_type_audio_model_json['channel_options'] = {'anyKey': 'anyValue'} + runtime_response_generic_runtime_response_type_audio_model_json['alt_text'] = 'testString' + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeAudio by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_audio_model = RuntimeResponseGenericRuntimeResponseTypeAudio.from_dict(runtime_response_generic_runtime_response_type_audio_model_json) + assert runtime_response_generic_runtime_response_type_audio_model != False + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeAudio by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_audio_model_dict = RuntimeResponseGenericRuntimeResponseTypeAudio.from_dict(runtime_response_generic_runtime_response_type_audio_model_json).__dict__ + runtime_response_generic_runtime_response_type_audio_model2 = RuntimeResponseGenericRuntimeResponseTypeAudio(**runtime_response_generic_runtime_response_type_audio_model_dict) + + # Verify the model instances are equivalent + assert runtime_response_generic_runtime_response_type_audio_model == runtime_response_generic_runtime_response_type_audio_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_response_generic_runtime_response_type_audio_model_json2 = runtime_response_generic_runtime_response_type_audio_model.to_dict() + assert runtime_response_generic_runtime_response_type_audio_model_json2 == runtime_response_generic_runtime_response_type_audio_model_json + + +class TestModel_RuntimeResponseGenericRuntimeResponseTypeChannelTransfer: + """ + Test Class for RuntimeResponseGenericRuntimeResponseTypeChannelTransfer + """ + + def test_runtime_response_generic_runtime_response_type_channel_transfer_serialization(self): + """ + Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeChannelTransfer + """ + + # Construct dict forms of any model objects needed in order to build this model. + + channel_transfer_target_chat_model = {} # ChannelTransferTargetChat + channel_transfer_target_chat_model['url'] = 'testString' + + channel_transfer_target_model = {} # ChannelTransferTarget + channel_transfer_target_model['chat'] = channel_transfer_target_chat_model + + channel_transfer_info_model = {} # ChannelTransferInfo + channel_transfer_info_model['target'] = channel_transfer_target_model + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'testString' + + # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeChannelTransfer model + runtime_response_generic_runtime_response_type_channel_transfer_model_json = {} + runtime_response_generic_runtime_response_type_channel_transfer_model_json['response_type'] = 'channel_transfer' + runtime_response_generic_runtime_response_type_channel_transfer_model_json['message_to_user'] = 'testString' + runtime_response_generic_runtime_response_type_channel_transfer_model_json['transfer_info'] = channel_transfer_info_model + runtime_response_generic_runtime_response_type_channel_transfer_model_json['channels'] = [response_generic_channel_model] + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeChannelTransfer by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_channel_transfer_model = RuntimeResponseGenericRuntimeResponseTypeChannelTransfer.from_dict(runtime_response_generic_runtime_response_type_channel_transfer_model_json) + assert runtime_response_generic_runtime_response_type_channel_transfer_model != False + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeChannelTransfer by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_channel_transfer_model_dict = RuntimeResponseGenericRuntimeResponseTypeChannelTransfer.from_dict(runtime_response_generic_runtime_response_type_channel_transfer_model_json).__dict__ + runtime_response_generic_runtime_response_type_channel_transfer_model2 = RuntimeResponseGenericRuntimeResponseTypeChannelTransfer(**runtime_response_generic_runtime_response_type_channel_transfer_model_dict) + + # Verify the model instances are equivalent + assert runtime_response_generic_runtime_response_type_channel_transfer_model == runtime_response_generic_runtime_response_type_channel_transfer_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_response_generic_runtime_response_type_channel_transfer_model_json2 = runtime_response_generic_runtime_response_type_channel_transfer_model.to_dict() + assert runtime_response_generic_runtime_response_type_channel_transfer_model_json2 == runtime_response_generic_runtime_response_type_channel_transfer_model_json + + +class TestModel_RuntimeResponseGenericRuntimeResponseTypeConnectToAgent: + """ + Test Class for RuntimeResponseGenericRuntimeResponseTypeConnectToAgent + """ + + def test_runtime_response_generic_runtime_response_type_connect_to_agent_serialization(self): + """ + Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeConnectToAgent + """ + + # Construct dict forms of any model objects needed in order to build this model. + + agent_availability_message_model = {} # AgentAvailabilityMessage + agent_availability_message_model['message'] = 'testString' + + dialog_node_output_connect_to_agent_transfer_info_model = {} # DialogNodeOutputConnectToAgentTransferInfo + dialog_node_output_connect_to_agent_transfer_info_model['target'] = {'key1': {'anyKey': 'anyValue'}} + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'testString' + + # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeConnectToAgent model + runtime_response_generic_runtime_response_type_connect_to_agent_model_json = {} + runtime_response_generic_runtime_response_type_connect_to_agent_model_json['response_type'] = 'connect_to_agent' + runtime_response_generic_runtime_response_type_connect_to_agent_model_json['message_to_human_agent'] = 'testString' + runtime_response_generic_runtime_response_type_connect_to_agent_model_json['agent_available'] = agent_availability_message_model + runtime_response_generic_runtime_response_type_connect_to_agent_model_json['agent_unavailable'] = agent_availability_message_model + runtime_response_generic_runtime_response_type_connect_to_agent_model_json['transfer_info'] = dialog_node_output_connect_to_agent_transfer_info_model + runtime_response_generic_runtime_response_type_connect_to_agent_model_json['topic'] = 'testString' + runtime_response_generic_runtime_response_type_connect_to_agent_model_json['channels'] = [response_generic_channel_model] + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeConnectToAgent by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_connect_to_agent_model = RuntimeResponseGenericRuntimeResponseTypeConnectToAgent.from_dict(runtime_response_generic_runtime_response_type_connect_to_agent_model_json) + assert runtime_response_generic_runtime_response_type_connect_to_agent_model != False + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeConnectToAgent by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_connect_to_agent_model_dict = RuntimeResponseGenericRuntimeResponseTypeConnectToAgent.from_dict(runtime_response_generic_runtime_response_type_connect_to_agent_model_json).__dict__ + runtime_response_generic_runtime_response_type_connect_to_agent_model2 = RuntimeResponseGenericRuntimeResponseTypeConnectToAgent(**runtime_response_generic_runtime_response_type_connect_to_agent_model_dict) + + # Verify the model instances are equivalent + assert runtime_response_generic_runtime_response_type_connect_to_agent_model == runtime_response_generic_runtime_response_type_connect_to_agent_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_response_generic_runtime_response_type_connect_to_agent_model_json2 = runtime_response_generic_runtime_response_type_connect_to_agent_model.to_dict() + assert runtime_response_generic_runtime_response_type_connect_to_agent_model_json2 == runtime_response_generic_runtime_response_type_connect_to_agent_model_json + + +class TestModel_RuntimeResponseGenericRuntimeResponseTypeConversationalSearch: + """ + Test Class for RuntimeResponseGenericRuntimeResponseTypeConversationalSearch + """ + + def test_runtime_response_generic_runtime_response_type_conversational_search_serialization(self): + """ + Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeConversationalSearch + """ + + # Construct dict forms of any model objects needed in order to build this model. + + response_generic_citation_ranges_item_model = {} # ResponseGenericCitationRangesItem + response_generic_citation_ranges_item_model['start'] = 38 + response_generic_citation_ranges_item_model['end'] = 38 + + response_generic_citation_model = {} # ResponseGenericCitation + response_generic_citation_model['title'] = 'testString' + response_generic_citation_model['text'] = 'testString' + response_generic_citation_model['body'] = 'testString' + response_generic_citation_model['search_result_index'] = 38 + response_generic_citation_model['ranges'] = [response_generic_citation_ranges_item_model] + + response_generic_confidence_scores_model = {} # ResponseGenericConfidenceScores + response_generic_confidence_scores_model['threshold'] = 72.5 + response_generic_confidence_scores_model['pre_gen'] = 72.5 + response_generic_confidence_scores_model['post_gen'] = 72.5 + response_generic_confidence_scores_model['extractiveness'] = 72.5 + + search_results_result_metadata_model = {} # SearchResultsResultMetadata + search_results_result_metadata_model['document_retrieval_source'] = 'testString' + search_results_result_metadata_model['score'] = 38 + + search_results_model = {} # SearchResults + search_results_model['result_metadata'] = search_results_result_metadata_model + search_results_model['id'] = 'testString' + search_results_model['title'] = 'testString' + search_results_model['body'] = 'testString' + + # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeConversationalSearch model + runtime_response_generic_runtime_response_type_conversational_search_model_json = {} + runtime_response_generic_runtime_response_type_conversational_search_model_json['response_type'] = 'conversation_search' + runtime_response_generic_runtime_response_type_conversational_search_model_json['text'] = 'testString' + runtime_response_generic_runtime_response_type_conversational_search_model_json['citations_title'] = 'testString' + runtime_response_generic_runtime_response_type_conversational_search_model_json['citations'] = [response_generic_citation_model] + runtime_response_generic_runtime_response_type_conversational_search_model_json['confidence_scores'] = response_generic_confidence_scores_model + runtime_response_generic_runtime_response_type_conversational_search_model_json['response_length_option'] = 'testString' + runtime_response_generic_runtime_response_type_conversational_search_model_json['search_results'] = [search_results_model] + runtime_response_generic_runtime_response_type_conversational_search_model_json['disclaimer'] = 'testString' + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeConversationalSearch by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_conversational_search_model = RuntimeResponseGenericRuntimeResponseTypeConversationalSearch.from_dict(runtime_response_generic_runtime_response_type_conversational_search_model_json) + assert runtime_response_generic_runtime_response_type_conversational_search_model != False + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeConversationalSearch by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_conversational_search_model_dict = RuntimeResponseGenericRuntimeResponseTypeConversationalSearch.from_dict(runtime_response_generic_runtime_response_type_conversational_search_model_json).__dict__ + runtime_response_generic_runtime_response_type_conversational_search_model2 = RuntimeResponseGenericRuntimeResponseTypeConversationalSearch(**runtime_response_generic_runtime_response_type_conversational_search_model_dict) + + # Verify the model instances are equivalent + assert runtime_response_generic_runtime_response_type_conversational_search_model == runtime_response_generic_runtime_response_type_conversational_search_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_response_generic_runtime_response_type_conversational_search_model_json2 = runtime_response_generic_runtime_response_type_conversational_search_model.to_dict() + assert runtime_response_generic_runtime_response_type_conversational_search_model_json2 == runtime_response_generic_runtime_response_type_conversational_search_model_json + + +class TestModel_RuntimeResponseGenericRuntimeResponseTypeDate: + """ + Test Class for RuntimeResponseGenericRuntimeResponseTypeDate + """ + + def test_runtime_response_generic_runtime_response_type_date_serialization(self): + """ + Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeDate + """ + + # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeDate model + runtime_response_generic_runtime_response_type_date_model_json = {} + runtime_response_generic_runtime_response_type_date_model_json['response_type'] = 'date' + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeDate by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_date_model = RuntimeResponseGenericRuntimeResponseTypeDate.from_dict(runtime_response_generic_runtime_response_type_date_model_json) + assert runtime_response_generic_runtime_response_type_date_model != False + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeDate by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_date_model_dict = RuntimeResponseGenericRuntimeResponseTypeDate.from_dict(runtime_response_generic_runtime_response_type_date_model_json).__dict__ + runtime_response_generic_runtime_response_type_date_model2 = RuntimeResponseGenericRuntimeResponseTypeDate(**runtime_response_generic_runtime_response_type_date_model_dict) + + # Verify the model instances are equivalent + assert runtime_response_generic_runtime_response_type_date_model == runtime_response_generic_runtime_response_type_date_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_response_generic_runtime_response_type_date_model_json2 = runtime_response_generic_runtime_response_type_date_model.to_dict() + assert runtime_response_generic_runtime_response_type_date_model_json2 == runtime_response_generic_runtime_response_type_date_model_json + + +class TestModel_RuntimeResponseGenericRuntimeResponseTypeDtmf: + """ + Test Class for RuntimeResponseGenericRuntimeResponseTypeDtmf + """ + + def test_runtime_response_generic_runtime_response_type_dtmf_serialization(self): + """ + Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeDtmf + """ + + # Construct dict forms of any model objects needed in order to build this model. + + dtmf_command_info_model = {} # DtmfCommandInfo + dtmf_command_info_model['type'] = 'collect' + dtmf_command_info_model['parameters'] = {'anyKey': 'anyValue'} + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'testString' + + # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeDtmf model + runtime_response_generic_runtime_response_type_dtmf_model_json = {} + runtime_response_generic_runtime_response_type_dtmf_model_json['response_type'] = 'dtmf' + runtime_response_generic_runtime_response_type_dtmf_model_json['command_info'] = dtmf_command_info_model + runtime_response_generic_runtime_response_type_dtmf_model_json['channels'] = [response_generic_channel_model] + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeDtmf by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_dtmf_model = RuntimeResponseGenericRuntimeResponseTypeDtmf.from_dict(runtime_response_generic_runtime_response_type_dtmf_model_json) + assert runtime_response_generic_runtime_response_type_dtmf_model != False + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeDtmf by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_dtmf_model_dict = RuntimeResponseGenericRuntimeResponseTypeDtmf.from_dict(runtime_response_generic_runtime_response_type_dtmf_model_json).__dict__ + runtime_response_generic_runtime_response_type_dtmf_model2 = RuntimeResponseGenericRuntimeResponseTypeDtmf(**runtime_response_generic_runtime_response_type_dtmf_model_dict) + + # Verify the model instances are equivalent + assert runtime_response_generic_runtime_response_type_dtmf_model == runtime_response_generic_runtime_response_type_dtmf_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_response_generic_runtime_response_type_dtmf_model_json2 = runtime_response_generic_runtime_response_type_dtmf_model.to_dict() + assert runtime_response_generic_runtime_response_type_dtmf_model_json2 == runtime_response_generic_runtime_response_type_dtmf_model_json + + +class TestModel_RuntimeResponseGenericRuntimeResponseTypeEndSession: + """ + Test Class for RuntimeResponseGenericRuntimeResponseTypeEndSession + """ + + def test_runtime_response_generic_runtime_response_type_end_session_serialization(self): + """ + Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeEndSession + """ + + # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeEndSession model + runtime_response_generic_runtime_response_type_end_session_model_json = {} + runtime_response_generic_runtime_response_type_end_session_model_json['response_type'] = 'end_session' + runtime_response_generic_runtime_response_type_end_session_model_json['channel_options'] = {'anyKey': 'anyValue'} + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeEndSession by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_end_session_model = RuntimeResponseGenericRuntimeResponseTypeEndSession.from_dict(runtime_response_generic_runtime_response_type_end_session_model_json) + assert runtime_response_generic_runtime_response_type_end_session_model != False + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeEndSession by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_end_session_model_dict = RuntimeResponseGenericRuntimeResponseTypeEndSession.from_dict(runtime_response_generic_runtime_response_type_end_session_model_json).__dict__ + runtime_response_generic_runtime_response_type_end_session_model2 = RuntimeResponseGenericRuntimeResponseTypeEndSession(**runtime_response_generic_runtime_response_type_end_session_model_dict) + + # Verify the model instances are equivalent + assert runtime_response_generic_runtime_response_type_end_session_model == runtime_response_generic_runtime_response_type_end_session_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_response_generic_runtime_response_type_end_session_model_json2 = runtime_response_generic_runtime_response_type_end_session_model.to_dict() + assert runtime_response_generic_runtime_response_type_end_session_model_json2 == runtime_response_generic_runtime_response_type_end_session_model_json + + +class TestModel_RuntimeResponseGenericRuntimeResponseTypeIframe: + """ + Test Class for RuntimeResponseGenericRuntimeResponseTypeIframe + """ + + def test_runtime_response_generic_runtime_response_type_iframe_serialization(self): + """ + Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeIframe + """ + + # Construct dict forms of any model objects needed in order to build this model. + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'testString' + + # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeIframe model + runtime_response_generic_runtime_response_type_iframe_model_json = {} + runtime_response_generic_runtime_response_type_iframe_model_json['response_type'] = 'iframe' + runtime_response_generic_runtime_response_type_iframe_model_json['source'] = 'testString' + runtime_response_generic_runtime_response_type_iframe_model_json['title'] = 'testString' + runtime_response_generic_runtime_response_type_iframe_model_json['description'] = 'testString' + runtime_response_generic_runtime_response_type_iframe_model_json['image_url'] = 'testString' + runtime_response_generic_runtime_response_type_iframe_model_json['channels'] = [response_generic_channel_model] + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeIframe by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_iframe_model = RuntimeResponseGenericRuntimeResponseTypeIframe.from_dict(runtime_response_generic_runtime_response_type_iframe_model_json) + assert runtime_response_generic_runtime_response_type_iframe_model != False + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeIframe by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_iframe_model_dict = RuntimeResponseGenericRuntimeResponseTypeIframe.from_dict(runtime_response_generic_runtime_response_type_iframe_model_json).__dict__ + runtime_response_generic_runtime_response_type_iframe_model2 = RuntimeResponseGenericRuntimeResponseTypeIframe(**runtime_response_generic_runtime_response_type_iframe_model_dict) + + # Verify the model instances are equivalent + assert runtime_response_generic_runtime_response_type_iframe_model == runtime_response_generic_runtime_response_type_iframe_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_response_generic_runtime_response_type_iframe_model_json2 = runtime_response_generic_runtime_response_type_iframe_model.to_dict() + assert runtime_response_generic_runtime_response_type_iframe_model_json2 == runtime_response_generic_runtime_response_type_iframe_model_json + + +class TestModel_RuntimeResponseGenericRuntimeResponseTypeImage: + """ + Test Class for RuntimeResponseGenericRuntimeResponseTypeImage + """ + + def test_runtime_response_generic_runtime_response_type_image_serialization(self): + """ + Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeImage + """ + + # Construct dict forms of any model objects needed in order to build this model. + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'testString' + + # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeImage model + runtime_response_generic_runtime_response_type_image_model_json = {} + runtime_response_generic_runtime_response_type_image_model_json['response_type'] = 'image' + runtime_response_generic_runtime_response_type_image_model_json['source'] = 'testString' + runtime_response_generic_runtime_response_type_image_model_json['title'] = 'testString' + runtime_response_generic_runtime_response_type_image_model_json['description'] = 'testString' + runtime_response_generic_runtime_response_type_image_model_json['channels'] = [response_generic_channel_model] + runtime_response_generic_runtime_response_type_image_model_json['alt_text'] = 'testString' + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeImage by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_image_model = RuntimeResponseGenericRuntimeResponseTypeImage.from_dict(runtime_response_generic_runtime_response_type_image_model_json) + assert runtime_response_generic_runtime_response_type_image_model != False + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeImage by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_image_model_dict = RuntimeResponseGenericRuntimeResponseTypeImage.from_dict(runtime_response_generic_runtime_response_type_image_model_json).__dict__ + runtime_response_generic_runtime_response_type_image_model2 = RuntimeResponseGenericRuntimeResponseTypeImage(**runtime_response_generic_runtime_response_type_image_model_dict) + + # Verify the model instances are equivalent + assert runtime_response_generic_runtime_response_type_image_model == runtime_response_generic_runtime_response_type_image_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_response_generic_runtime_response_type_image_model_json2 = runtime_response_generic_runtime_response_type_image_model.to_dict() + assert runtime_response_generic_runtime_response_type_image_model_json2 == runtime_response_generic_runtime_response_type_image_model_json + + +class TestModel_RuntimeResponseGenericRuntimeResponseTypeOption: + """ + Test Class for RuntimeResponseGenericRuntimeResponseTypeOption + """ + + def test_runtime_response_generic_runtime_response_type_option_serialization(self): + """ + Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeOption + """ + + # Construct dict forms of any model objects needed in order to build this model. + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + runtime_intent_model['skill'] = 'testString' + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + runtime_entity_model['skill'] = 'testString' + + message_input_attachment_model = {} # MessageInputAttachment + message_input_attachment_model['url'] = 'testString' + message_input_attachment_model['media_type'] = 'testString' + + request_analytics_model = {} # RequestAnalytics + request_analytics_model['browser'] = 'testString' + request_analytics_model['device'] = 'testString' + request_analytics_model['pageUrl'] = 'testString' + + message_input_options_spelling_model = {} # MessageInputOptionsSpelling + message_input_options_spelling_model['suggestions'] = True + message_input_options_spelling_model['auto_correct'] = True + + message_input_options_model = {} # MessageInputOptions + message_input_options_model['restart'] = False + message_input_options_model['alternate_intents'] = False + message_input_options_model['async_callout'] = False + message_input_options_model['spelling'] = message_input_options_spelling_model + message_input_options_model['debug'] = False + message_input_options_model['return_context'] = False + message_input_options_model['export'] = False + + message_input_model = {} # MessageInput + message_input_model['message_type'] = 'text' + message_input_model['text'] = 'testString' + message_input_model['intents'] = [runtime_intent_model] + message_input_model['entities'] = [runtime_entity_model] + message_input_model['suggestion_id'] = 'testString' + message_input_model['attachments'] = [message_input_attachment_model] + message_input_model['analytics'] = request_analytics_model + message_input_model['options'] = message_input_options_model + + dialog_node_output_options_element_value_model = {} # DialogNodeOutputOptionsElementValue + dialog_node_output_options_element_value_model['input'] = message_input_model + + dialog_node_output_options_element_model = {} # DialogNodeOutputOptionsElement + dialog_node_output_options_element_model['label'] = 'testString' + dialog_node_output_options_element_model['value'] = dialog_node_output_options_element_value_model + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'testString' + + # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeOption model + runtime_response_generic_runtime_response_type_option_model_json = {} + runtime_response_generic_runtime_response_type_option_model_json['response_type'] = 'option' + runtime_response_generic_runtime_response_type_option_model_json['title'] = 'testString' + runtime_response_generic_runtime_response_type_option_model_json['description'] = 'testString' + runtime_response_generic_runtime_response_type_option_model_json['preference'] = 'dropdown' + runtime_response_generic_runtime_response_type_option_model_json['options'] = [dialog_node_output_options_element_model] + runtime_response_generic_runtime_response_type_option_model_json['channels'] = [response_generic_channel_model] + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeOption by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_option_model = RuntimeResponseGenericRuntimeResponseTypeOption.from_dict(runtime_response_generic_runtime_response_type_option_model_json) + assert runtime_response_generic_runtime_response_type_option_model != False + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeOption by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_option_model_dict = RuntimeResponseGenericRuntimeResponseTypeOption.from_dict(runtime_response_generic_runtime_response_type_option_model_json).__dict__ + runtime_response_generic_runtime_response_type_option_model2 = RuntimeResponseGenericRuntimeResponseTypeOption(**runtime_response_generic_runtime_response_type_option_model_dict) + + # Verify the model instances are equivalent + assert runtime_response_generic_runtime_response_type_option_model == runtime_response_generic_runtime_response_type_option_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_response_generic_runtime_response_type_option_model_json2 = runtime_response_generic_runtime_response_type_option_model.to_dict() + assert runtime_response_generic_runtime_response_type_option_model_json2 == runtime_response_generic_runtime_response_type_option_model_json + + +class TestModel_RuntimeResponseGenericRuntimeResponseTypePause: + """ + Test Class for RuntimeResponseGenericRuntimeResponseTypePause + """ + + def test_runtime_response_generic_runtime_response_type_pause_serialization(self): + """ + Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypePause + """ + + # Construct dict forms of any model objects needed in order to build this model. + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'testString' + + # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypePause model + runtime_response_generic_runtime_response_type_pause_model_json = {} + runtime_response_generic_runtime_response_type_pause_model_json['response_type'] = 'pause' + runtime_response_generic_runtime_response_type_pause_model_json['time'] = 38 + runtime_response_generic_runtime_response_type_pause_model_json['typing'] = True + runtime_response_generic_runtime_response_type_pause_model_json['channels'] = [response_generic_channel_model] + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypePause by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_pause_model = RuntimeResponseGenericRuntimeResponseTypePause.from_dict(runtime_response_generic_runtime_response_type_pause_model_json) + assert runtime_response_generic_runtime_response_type_pause_model != False + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypePause by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_pause_model_dict = RuntimeResponseGenericRuntimeResponseTypePause.from_dict(runtime_response_generic_runtime_response_type_pause_model_json).__dict__ + runtime_response_generic_runtime_response_type_pause_model2 = RuntimeResponseGenericRuntimeResponseTypePause(**runtime_response_generic_runtime_response_type_pause_model_dict) + + # Verify the model instances are equivalent + assert runtime_response_generic_runtime_response_type_pause_model == runtime_response_generic_runtime_response_type_pause_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_response_generic_runtime_response_type_pause_model_json2 = runtime_response_generic_runtime_response_type_pause_model.to_dict() + assert runtime_response_generic_runtime_response_type_pause_model_json2 == runtime_response_generic_runtime_response_type_pause_model_json + + +class TestModel_RuntimeResponseGenericRuntimeResponseTypeSearch: + """ + Test Class for RuntimeResponseGenericRuntimeResponseTypeSearch + """ + + def test_runtime_response_generic_runtime_response_type_search_serialization(self): + """ + Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeSearch + """ + + # Construct dict forms of any model objects needed in order to build this model. + + search_result_metadata_model = {} # SearchResultMetadata + search_result_metadata_model['confidence'] = 72.5 + search_result_metadata_model['score'] = 72.5 + + search_result_highlight_model = {} # SearchResultHighlight + search_result_highlight_model['body'] = ['testString'] + search_result_highlight_model['title'] = ['testString'] + search_result_highlight_model['url'] = ['testString'] + search_result_highlight_model['foo'] = ['testString'] + + search_result_answer_model = {} # SearchResultAnswer + search_result_answer_model['text'] = 'testString' + search_result_answer_model['confidence'] = 0 + + search_result_model = {} # SearchResult + search_result_model['id'] = 'testString' + search_result_model['result_metadata'] = search_result_metadata_model + search_result_model['body'] = 'testString' + search_result_model['title'] = 'testString' + search_result_model['url'] = 'testString' + search_result_model['highlight'] = search_result_highlight_model + search_result_model['answers'] = [search_result_answer_model] + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'testString' + + # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeSearch model + runtime_response_generic_runtime_response_type_search_model_json = {} + runtime_response_generic_runtime_response_type_search_model_json['response_type'] = 'search' + runtime_response_generic_runtime_response_type_search_model_json['header'] = 'testString' + runtime_response_generic_runtime_response_type_search_model_json['primary_results'] = [search_result_model] + runtime_response_generic_runtime_response_type_search_model_json['additional_results'] = [search_result_model] + runtime_response_generic_runtime_response_type_search_model_json['channels'] = [response_generic_channel_model] + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeSearch by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_search_model = RuntimeResponseGenericRuntimeResponseTypeSearch.from_dict(runtime_response_generic_runtime_response_type_search_model_json) + assert runtime_response_generic_runtime_response_type_search_model != False + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeSearch by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_search_model_dict = RuntimeResponseGenericRuntimeResponseTypeSearch.from_dict(runtime_response_generic_runtime_response_type_search_model_json).__dict__ + runtime_response_generic_runtime_response_type_search_model2 = RuntimeResponseGenericRuntimeResponseTypeSearch(**runtime_response_generic_runtime_response_type_search_model_dict) + + # Verify the model instances are equivalent + assert runtime_response_generic_runtime_response_type_search_model == runtime_response_generic_runtime_response_type_search_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_response_generic_runtime_response_type_search_model_json2 = runtime_response_generic_runtime_response_type_search_model.to_dict() + assert runtime_response_generic_runtime_response_type_search_model_json2 == runtime_response_generic_runtime_response_type_search_model_json + + +class TestModel_RuntimeResponseGenericRuntimeResponseTypeSuggestion: + """ + Test Class for RuntimeResponseGenericRuntimeResponseTypeSuggestion + """ + + def test_runtime_response_generic_runtime_response_type_suggestion_serialization(self): + """ + Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeSuggestion + """ + + # Construct dict forms of any model objects needed in order to build this model. + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + runtime_intent_model['skill'] = 'testString' + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + runtime_entity_model['skill'] = 'testString' + + message_input_attachment_model = {} # MessageInputAttachment + message_input_attachment_model['url'] = 'testString' + message_input_attachment_model['media_type'] = 'testString' + + request_analytics_model = {} # RequestAnalytics + request_analytics_model['browser'] = 'testString' + request_analytics_model['device'] = 'testString' + request_analytics_model['pageUrl'] = 'testString' + + message_input_options_spelling_model = {} # MessageInputOptionsSpelling + message_input_options_spelling_model['suggestions'] = True + message_input_options_spelling_model['auto_correct'] = True + + message_input_options_model = {} # MessageInputOptions + message_input_options_model['restart'] = False + message_input_options_model['alternate_intents'] = False + message_input_options_model['async_callout'] = False + message_input_options_model['spelling'] = message_input_options_spelling_model + message_input_options_model['debug'] = False + message_input_options_model['return_context'] = False + message_input_options_model['export'] = False + + message_input_model = {} # MessageInput + message_input_model['message_type'] = 'text' + message_input_model['text'] = 'testString' + message_input_model['intents'] = [runtime_intent_model] + message_input_model['entities'] = [runtime_entity_model] + message_input_model['suggestion_id'] = 'testString' + message_input_model['attachments'] = [message_input_attachment_model] + message_input_model['analytics'] = request_analytics_model + message_input_model['options'] = message_input_options_model + + dialog_suggestion_value_model = {} # DialogSuggestionValue + dialog_suggestion_value_model['input'] = message_input_model + + dialog_suggestion_model = {} # DialogSuggestion + dialog_suggestion_model['label'] = 'testString' + dialog_suggestion_model['value'] = dialog_suggestion_value_model + dialog_suggestion_model['output'] = {'anyKey': 'anyValue'} + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'testString' + + # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeSuggestion model + runtime_response_generic_runtime_response_type_suggestion_model_json = {} + runtime_response_generic_runtime_response_type_suggestion_model_json['response_type'] = 'suggestion' + runtime_response_generic_runtime_response_type_suggestion_model_json['title'] = 'testString' + runtime_response_generic_runtime_response_type_suggestion_model_json['suggestions'] = [dialog_suggestion_model] + runtime_response_generic_runtime_response_type_suggestion_model_json['channels'] = [response_generic_channel_model] + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeSuggestion by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_suggestion_model = RuntimeResponseGenericRuntimeResponseTypeSuggestion.from_dict(runtime_response_generic_runtime_response_type_suggestion_model_json) + assert runtime_response_generic_runtime_response_type_suggestion_model != False + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeSuggestion by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_suggestion_model_dict = RuntimeResponseGenericRuntimeResponseTypeSuggestion.from_dict(runtime_response_generic_runtime_response_type_suggestion_model_json).__dict__ + runtime_response_generic_runtime_response_type_suggestion_model2 = RuntimeResponseGenericRuntimeResponseTypeSuggestion(**runtime_response_generic_runtime_response_type_suggestion_model_dict) + + # Verify the model instances are equivalent + assert runtime_response_generic_runtime_response_type_suggestion_model == runtime_response_generic_runtime_response_type_suggestion_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_response_generic_runtime_response_type_suggestion_model_json2 = runtime_response_generic_runtime_response_type_suggestion_model.to_dict() + assert runtime_response_generic_runtime_response_type_suggestion_model_json2 == runtime_response_generic_runtime_response_type_suggestion_model_json + + +class TestModel_RuntimeResponseGenericRuntimeResponseTypeText: + """ + Test Class for RuntimeResponseGenericRuntimeResponseTypeText + """ + + def test_runtime_response_generic_runtime_response_type_text_serialization(self): + """ + Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeText + """ + + # Construct dict forms of any model objects needed in order to build this model. + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'testString' + + # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeText model + runtime_response_generic_runtime_response_type_text_model_json = {} + runtime_response_generic_runtime_response_type_text_model_json['response_type'] = 'text' + runtime_response_generic_runtime_response_type_text_model_json['text'] = 'testString' + runtime_response_generic_runtime_response_type_text_model_json['channels'] = [response_generic_channel_model] + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeText by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_text_model = RuntimeResponseGenericRuntimeResponseTypeText.from_dict(runtime_response_generic_runtime_response_type_text_model_json) + assert runtime_response_generic_runtime_response_type_text_model != False + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeText by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_text_model_dict = RuntimeResponseGenericRuntimeResponseTypeText.from_dict(runtime_response_generic_runtime_response_type_text_model_json).__dict__ + runtime_response_generic_runtime_response_type_text_model2 = RuntimeResponseGenericRuntimeResponseTypeText(**runtime_response_generic_runtime_response_type_text_model_dict) + + # Verify the model instances are equivalent + assert runtime_response_generic_runtime_response_type_text_model == runtime_response_generic_runtime_response_type_text_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_response_generic_runtime_response_type_text_model_json2 = runtime_response_generic_runtime_response_type_text_model.to_dict() + assert runtime_response_generic_runtime_response_type_text_model_json2 == runtime_response_generic_runtime_response_type_text_model_json + + +class TestModel_RuntimeResponseGenericRuntimeResponseTypeUserDefined: + """ + Test Class for RuntimeResponseGenericRuntimeResponseTypeUserDefined + """ + + def test_runtime_response_generic_runtime_response_type_user_defined_serialization(self): + """ + Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeUserDefined + """ + + # Construct dict forms of any model objects needed in order to build this model. + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'testString' + + # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeUserDefined model + runtime_response_generic_runtime_response_type_user_defined_model_json = {} + runtime_response_generic_runtime_response_type_user_defined_model_json['response_type'] = 'user_defined' + runtime_response_generic_runtime_response_type_user_defined_model_json['user_defined'] = {'anyKey': 'anyValue'} + runtime_response_generic_runtime_response_type_user_defined_model_json['channels'] = [response_generic_channel_model] + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeUserDefined by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_user_defined_model = RuntimeResponseGenericRuntimeResponseTypeUserDefined.from_dict(runtime_response_generic_runtime_response_type_user_defined_model_json) + assert runtime_response_generic_runtime_response_type_user_defined_model != False + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeUserDefined by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_user_defined_model_dict = RuntimeResponseGenericRuntimeResponseTypeUserDefined.from_dict(runtime_response_generic_runtime_response_type_user_defined_model_json).__dict__ + runtime_response_generic_runtime_response_type_user_defined_model2 = RuntimeResponseGenericRuntimeResponseTypeUserDefined(**runtime_response_generic_runtime_response_type_user_defined_model_dict) + + # Verify the model instances are equivalent + assert runtime_response_generic_runtime_response_type_user_defined_model == runtime_response_generic_runtime_response_type_user_defined_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_response_generic_runtime_response_type_user_defined_model_json2 = runtime_response_generic_runtime_response_type_user_defined_model.to_dict() + assert runtime_response_generic_runtime_response_type_user_defined_model_json2 == runtime_response_generic_runtime_response_type_user_defined_model_json + + +class TestModel_RuntimeResponseGenericRuntimeResponseTypeVideo: + """ + Test Class for RuntimeResponseGenericRuntimeResponseTypeVideo + """ + + def test_runtime_response_generic_runtime_response_type_video_serialization(self): + """ + Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeVideo + """ + + # Construct dict forms of any model objects needed in order to build this model. + + response_generic_channel_model = {} # ResponseGenericChannel + response_generic_channel_model['channel'] = 'testString' + + # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeVideo model + runtime_response_generic_runtime_response_type_video_model_json = {} + runtime_response_generic_runtime_response_type_video_model_json['response_type'] = 'video' + runtime_response_generic_runtime_response_type_video_model_json['source'] = 'testString' + runtime_response_generic_runtime_response_type_video_model_json['title'] = 'testString' + runtime_response_generic_runtime_response_type_video_model_json['description'] = 'testString' + runtime_response_generic_runtime_response_type_video_model_json['channels'] = [response_generic_channel_model] + runtime_response_generic_runtime_response_type_video_model_json['channel_options'] = {'anyKey': 'anyValue'} + runtime_response_generic_runtime_response_type_video_model_json['alt_text'] = 'testString' + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeVideo by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_video_model = RuntimeResponseGenericRuntimeResponseTypeVideo.from_dict(runtime_response_generic_runtime_response_type_video_model_json) + assert runtime_response_generic_runtime_response_type_video_model != False + + # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeVideo by calling from_dict on the json representation + runtime_response_generic_runtime_response_type_video_model_dict = RuntimeResponseGenericRuntimeResponseTypeVideo.from_dict(runtime_response_generic_runtime_response_type_video_model_json).__dict__ + runtime_response_generic_runtime_response_type_video_model2 = RuntimeResponseGenericRuntimeResponseTypeVideo(**runtime_response_generic_runtime_response_type_video_model_dict) + + # Verify the model instances are equivalent + assert runtime_response_generic_runtime_response_type_video_model == runtime_response_generic_runtime_response_type_video_model2 + + # Convert model instance back to dict and verify no loss of data + runtime_response_generic_runtime_response_type_video_model_json2 = runtime_response_generic_runtime_response_type_video_model.to_dict() + assert runtime_response_generic_runtime_response_type_video_model_json2 == runtime_response_generic_runtime_response_type_video_model_json + + +class TestModel_StatelessMessageStreamResponseMessageStreamCompleteItem: + """ + Test Class for StatelessMessageStreamResponseMessageStreamCompleteItem + """ + + def test_stateless_message_stream_response_message_stream_complete_item_serialization(self): + """ + Test serialization/deserialization for StatelessMessageStreamResponseMessageStreamCompleteItem + """ + + # Construct dict forms of any model objects needed in order to build this model. + + metadata_model = {} # Metadata + metadata_model['id'] = 38 + + complete_item_model = {} # CompleteItem + complete_item_model['streaming_metadata'] = metadata_model + + # Construct a json representation of a StatelessMessageStreamResponseMessageStreamCompleteItem model + stateless_message_stream_response_message_stream_complete_item_model_json = {} + stateless_message_stream_response_message_stream_complete_item_model_json['complete_item'] = complete_item_model + + # Construct a model instance of StatelessMessageStreamResponseMessageStreamCompleteItem by calling from_dict on the json representation + stateless_message_stream_response_message_stream_complete_item_model = StatelessMessageStreamResponseMessageStreamCompleteItem.from_dict(stateless_message_stream_response_message_stream_complete_item_model_json) + assert stateless_message_stream_response_message_stream_complete_item_model != False + + # Construct a model instance of StatelessMessageStreamResponseMessageStreamCompleteItem by calling from_dict on the json representation + stateless_message_stream_response_message_stream_complete_item_model_dict = StatelessMessageStreamResponseMessageStreamCompleteItem.from_dict(stateless_message_stream_response_message_stream_complete_item_model_json).__dict__ + stateless_message_stream_response_message_stream_complete_item_model2 = StatelessMessageStreamResponseMessageStreamCompleteItem(**stateless_message_stream_response_message_stream_complete_item_model_dict) + + # Verify the model instances are equivalent + assert stateless_message_stream_response_message_stream_complete_item_model == stateless_message_stream_response_message_stream_complete_item_model2 + + # Convert model instance back to dict and verify no loss of data + stateless_message_stream_response_message_stream_complete_item_model_json2 = stateless_message_stream_response_message_stream_complete_item_model.to_dict() + assert stateless_message_stream_response_message_stream_complete_item_model_json2 == stateless_message_stream_response_message_stream_complete_item_model_json + + +class TestModel_StatelessMessageStreamResponseMessageStreamPartialItem: + """ + Test Class for StatelessMessageStreamResponseMessageStreamPartialItem + """ + + def test_stateless_message_stream_response_message_stream_partial_item_serialization(self): + """ + Test serialization/deserialization for StatelessMessageStreamResponseMessageStreamPartialItem + """ + + # Construct dict forms of any model objects needed in order to build this model. + + metadata_model = {} # Metadata + metadata_model['id'] = 38 + + partial_item_model = {} # PartialItem + partial_item_model['response_type'] = 'testString' + partial_item_model['text'] = 'testString' + partial_item_model['streaming_metadata'] = metadata_model + + # Construct a json representation of a StatelessMessageStreamResponseMessageStreamPartialItem model + stateless_message_stream_response_message_stream_partial_item_model_json = {} + stateless_message_stream_response_message_stream_partial_item_model_json['partial_item'] = partial_item_model + + # Construct a model instance of StatelessMessageStreamResponseMessageStreamPartialItem by calling from_dict on the json representation + stateless_message_stream_response_message_stream_partial_item_model = StatelessMessageStreamResponseMessageStreamPartialItem.from_dict(stateless_message_stream_response_message_stream_partial_item_model_json) + assert stateless_message_stream_response_message_stream_partial_item_model != False + + # Construct a model instance of StatelessMessageStreamResponseMessageStreamPartialItem by calling from_dict on the json representation + stateless_message_stream_response_message_stream_partial_item_model_dict = StatelessMessageStreamResponseMessageStreamPartialItem.from_dict(stateless_message_stream_response_message_stream_partial_item_model_json).__dict__ + stateless_message_stream_response_message_stream_partial_item_model2 = StatelessMessageStreamResponseMessageStreamPartialItem(**stateless_message_stream_response_message_stream_partial_item_model_dict) + + # Verify the model instances are equivalent + assert stateless_message_stream_response_message_stream_partial_item_model == stateless_message_stream_response_message_stream_partial_item_model2 + + # Convert model instance back to dict and verify no loss of data + stateless_message_stream_response_message_stream_partial_item_model_json2 = stateless_message_stream_response_message_stream_partial_item_model.to_dict() + assert stateless_message_stream_response_message_stream_partial_item_model_json2 == stateless_message_stream_response_message_stream_partial_item_model_json + + +class TestModel_StatelessMessageStreamResponseStatelessMessageStreamFinalResponse: + """ + Test Class for StatelessMessageStreamResponseStatelessMessageStreamFinalResponse + """ + + def test_stateless_message_stream_response_stateless_message_stream_final_response_serialization(self): + """ + Test serialization/deserialization for StatelessMessageStreamResponseStatelessMessageStreamFinalResponse + """ + + # Construct dict forms of any model objects needed in order to build this model. + + response_generic_citation_ranges_item_model = {} # ResponseGenericCitationRangesItem + response_generic_citation_ranges_item_model['start'] = 38 + response_generic_citation_ranges_item_model['end'] = 38 + + response_generic_citation_model = {} # ResponseGenericCitation + response_generic_citation_model['title'] = 'testString' + response_generic_citation_model['text'] = 'testString' + response_generic_citation_model['body'] = 'testString' + response_generic_citation_model['search_result_index'] = 38 + response_generic_citation_model['ranges'] = [response_generic_citation_ranges_item_model] + + response_generic_confidence_scores_model = {} # ResponseGenericConfidenceScores + response_generic_confidence_scores_model['threshold'] = 72.5 + response_generic_confidence_scores_model['pre_gen'] = 72.5 + response_generic_confidence_scores_model['post_gen'] = 72.5 + response_generic_confidence_scores_model['extractiveness'] = 72.5 + + search_results_result_metadata_model = {} # SearchResultsResultMetadata + search_results_result_metadata_model['document_retrieval_source'] = 'testString' + search_results_result_metadata_model['score'] = 38 + + search_results_model = {} # SearchResults + search_results_model['result_metadata'] = search_results_result_metadata_model + search_results_model['id'] = 'testString' + search_results_model['title'] = 'testString' + search_results_model['body'] = 'testString' + + runtime_response_generic_model = {} # RuntimeResponseGenericRuntimeResponseTypeConversationalSearch + runtime_response_generic_model['response_type'] = 'conversation_search' + runtime_response_generic_model['text'] = 'testString' + runtime_response_generic_model['citations_title'] = 'testString' + runtime_response_generic_model['citations'] = [response_generic_citation_model] + runtime_response_generic_model['confidence_scores'] = response_generic_confidence_scores_model + runtime_response_generic_model['response_length_option'] = 'testString' + runtime_response_generic_model['search_results'] = [search_results_model] + runtime_response_generic_model['disclaimer'] = 'testString' + + runtime_intent_model = {} # RuntimeIntent + runtime_intent_model['intent'] = 'testString' + runtime_intent_model['confidence'] = 72.5 + runtime_intent_model['skill'] = 'testString' + + capture_group_model = {} # CaptureGroup + capture_group_model['group'] = 'testString' + capture_group_model['location'] = [38] + + runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation + runtime_entity_interpretation_model['calendar_type'] = 'testString' + runtime_entity_interpretation_model['datetime_link'] = 'testString' + runtime_entity_interpretation_model['festival'] = 'testString' + runtime_entity_interpretation_model['granularity'] = 'day' + runtime_entity_interpretation_model['range_link'] = 'testString' + runtime_entity_interpretation_model['range_modifier'] = 'testString' + runtime_entity_interpretation_model['relative_day'] = 72.5 + runtime_entity_interpretation_model['relative_month'] = 72.5 + runtime_entity_interpretation_model['relative_week'] = 72.5 + runtime_entity_interpretation_model['relative_weekend'] = 72.5 + runtime_entity_interpretation_model['relative_year'] = 72.5 + runtime_entity_interpretation_model['specific_day'] = 72.5 + runtime_entity_interpretation_model['specific_day_of_week'] = 'testString' + runtime_entity_interpretation_model['specific_month'] = 72.5 + runtime_entity_interpretation_model['specific_quarter'] = 72.5 + runtime_entity_interpretation_model['specific_year'] = 72.5 + runtime_entity_interpretation_model['numeric_value'] = 72.5 + runtime_entity_interpretation_model['subtype'] = 'testString' + runtime_entity_interpretation_model['part_of_day'] = 'testString' + runtime_entity_interpretation_model['relative_hour'] = 72.5 + runtime_entity_interpretation_model['relative_minute'] = 72.5 + runtime_entity_interpretation_model['relative_second'] = 72.5 + runtime_entity_interpretation_model['specific_hour'] = 72.5 + runtime_entity_interpretation_model['specific_minute'] = 72.5 + runtime_entity_interpretation_model['specific_second'] = 72.5 + runtime_entity_interpretation_model['timezone'] = 'testString' + + runtime_entity_alternative_model = {} # RuntimeEntityAlternative + runtime_entity_alternative_model['value'] = 'testString' + runtime_entity_alternative_model['confidence'] = 72.5 + + runtime_entity_role_model = {} # RuntimeEntityRole + runtime_entity_role_model['type'] = 'date_from' + + runtime_entity_model = {} # RuntimeEntity + runtime_entity_model['entity'] = 'testString' + runtime_entity_model['location'] = [38] + runtime_entity_model['value'] = 'testString' + runtime_entity_model['confidence'] = 72.5 + runtime_entity_model['groups'] = [capture_group_model] + runtime_entity_model['interpretation'] = runtime_entity_interpretation_model + runtime_entity_model['alternatives'] = [runtime_entity_alternative_model] + runtime_entity_model['role'] = runtime_entity_role_model + runtime_entity_model['skill'] = 'testString' + + dialog_node_action_model = {} # DialogNodeAction + dialog_node_action_model['name'] = 'testString' + dialog_node_action_model['type'] = 'client' + dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'} + dialog_node_action_model['result_variable'] = 'testString' + dialog_node_action_model['credentials'] = 'testString' + + dialog_node_visited_model = {} # DialogNodeVisited + dialog_node_visited_model['dialog_node'] = 'testString' + dialog_node_visited_model['title'] = 'testString' + dialog_node_visited_model['conditions'] = 'testString' + + log_message_source_model = {} # LogMessageSourceDialogNode + log_message_source_model['type'] = 'dialog_node' + log_message_source_model['dialog_node'] = 'testString' + + dialog_log_message_model = {} # DialogLogMessage + dialog_log_message_model['level'] = 'info' + dialog_log_message_model['message'] = 'testString' + dialog_log_message_model['code'] = 'testString' + dialog_log_message_model['source'] = log_message_source_model + + turn_event_action_source_model = {} # TurnEventActionSource + turn_event_action_source_model['type'] = 'action' + turn_event_action_source_model['action'] = 'testString' + turn_event_action_source_model['action_title'] = 'testString' + turn_event_action_source_model['condition'] = 'testString' + + message_output_debug_turn_event_model = {} # MessageOutputDebugTurnEventTurnEventActionVisited + message_output_debug_turn_event_model['event'] = 'action_visited' + message_output_debug_turn_event_model['source'] = turn_event_action_source_model + message_output_debug_turn_event_model['action_start_time'] = 'testString' + message_output_debug_turn_event_model['condition_type'] = 'user_defined' + message_output_debug_turn_event_model['reason'] = 'intent' + message_output_debug_turn_event_model['result_variable'] = 'testString' + + message_output_debug_model = {} # MessageOutputDebug + message_output_debug_model['nodes_visited'] = [dialog_node_visited_model] + message_output_debug_model['log_messages'] = [dialog_log_message_model] + message_output_debug_model['branch_exited'] = True + message_output_debug_model['branch_exited_reason'] = 'completed' + message_output_debug_model['turn_events'] = [message_output_debug_turn_event_model] + + message_output_spelling_model = {} # MessageOutputSpelling + message_output_spelling_model['text'] = 'testString' + message_output_spelling_model['original_text'] = 'testString' + message_output_spelling_model['suggested_text'] = 'testString' + + message_output_llm_metadata_model = {} # MessageOutputLLMMetadata + message_output_llm_metadata_model['task'] = 'testString' + message_output_llm_metadata_model['model_id'] = 'testString' + + message_context_global_system_model = {} # MessageContextGlobalSystem + message_context_global_system_model['timezone'] = 'testString' + message_context_global_system_model['user_id'] = 'testString' + message_context_global_system_model['turn_count'] = 38 + message_context_global_system_model['locale'] = 'en-us' + message_context_global_system_model['reference_time'] = 'testString' + message_context_global_system_model['session_start_time'] = 'testString' + message_context_global_system_model['state'] = 'testString' + message_context_global_system_model['skip_user_input'] = True + + stateless_message_context_global_model = {} # StatelessMessageContextGlobal + stateless_message_context_global_model['system'] = message_context_global_system_model + stateless_message_context_global_model['session_id'] = 'testString' + + message_context_skill_system_model = {} # MessageContextSkillSystem + message_context_skill_system_model['state'] = 'testString' + message_context_skill_system_model['foo'] = 'testString' + + message_context_dialog_skill_model = {} # MessageContextDialogSkill + message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'} + message_context_dialog_skill_model['system'] = message_context_skill_system_model + + stateless_message_context_skills_actions_skill_model = {} # StatelessMessageContextSkillsActionsSkill + stateless_message_context_skills_actions_skill_model['user_defined'] = {'anyKey': 'anyValue'} + stateless_message_context_skills_actions_skill_model['system'] = message_context_skill_system_model + stateless_message_context_skills_actions_skill_model['action_variables'] = {'anyKey': 'anyValue'} + stateless_message_context_skills_actions_skill_model['skill_variables'] = {'anyKey': 'anyValue'} + stateless_message_context_skills_actions_skill_model['private_action_variables'] = {'anyKey': 'anyValue'} + stateless_message_context_skills_actions_skill_model['private_skill_variables'] = {'anyKey': 'anyValue'} + + stateless_message_context_skills_model = {} # StatelessMessageContextSkills + stateless_message_context_skills_model['main skill'] = message_context_dialog_skill_model + stateless_message_context_skills_model['actions skill'] = stateless_message_context_skills_actions_skill_model + + stateless_message_context_model = {} # StatelessMessageContext + stateless_message_context_model['global'] = stateless_message_context_global_model + stateless_message_context_model['skills'] = stateless_message_context_skills_model + stateless_message_context_model['integrations'] = {'anyKey': 'anyValue'} + + stateless_final_response_output_model = {} # StatelessFinalResponseOutput + stateless_final_response_output_model['generic'] = [runtime_response_generic_model] + stateless_final_response_output_model['intents'] = [runtime_intent_model] + stateless_final_response_output_model['entities'] = [runtime_entity_model] + stateless_final_response_output_model['actions'] = [dialog_node_action_model] + stateless_final_response_output_model['debug'] = message_output_debug_model + stateless_final_response_output_model['user_defined'] = {'anyKey': 'anyValue'} + stateless_final_response_output_model['spelling'] = message_output_spelling_model + stateless_final_response_output_model['llm_metadata'] = [message_output_llm_metadata_model] + stateless_final_response_output_model['streaming_metadata'] = stateless_message_context_model + + stateless_final_response_model = {} # StatelessFinalResponse + stateless_final_response_model['output'] = stateless_final_response_output_model + stateless_final_response_model['context'] = stateless_message_context_model + stateless_final_response_model['user_id'] = 'testString' + + # Construct a json representation of a StatelessMessageStreamResponseStatelessMessageStreamFinalResponse model + stateless_message_stream_response_stateless_message_stream_final_response_model_json = {} + stateless_message_stream_response_stateless_message_stream_final_response_model_json['final_response'] = stateless_final_response_model + + # Construct a model instance of StatelessMessageStreamResponseStatelessMessageStreamFinalResponse by calling from_dict on the json representation + stateless_message_stream_response_stateless_message_stream_final_response_model = StatelessMessageStreamResponseStatelessMessageStreamFinalResponse.from_dict(stateless_message_stream_response_stateless_message_stream_final_response_model_json) + assert stateless_message_stream_response_stateless_message_stream_final_response_model != False + + # Construct a model instance of StatelessMessageStreamResponseStatelessMessageStreamFinalResponse by calling from_dict on the json representation + stateless_message_stream_response_stateless_message_stream_final_response_model_dict = StatelessMessageStreamResponseStatelessMessageStreamFinalResponse.from_dict(stateless_message_stream_response_stateless_message_stream_final_response_model_json).__dict__ + stateless_message_stream_response_stateless_message_stream_final_response_model2 = StatelessMessageStreamResponseStatelessMessageStreamFinalResponse(**stateless_message_stream_response_stateless_message_stream_final_response_model_dict) + + # Verify the model instances are equivalent + assert stateless_message_stream_response_stateless_message_stream_final_response_model == stateless_message_stream_response_stateless_message_stream_final_response_model2 + + # Convert model instance back to dict and verify no loss of data + stateless_message_stream_response_stateless_message_stream_final_response_model_json2 = stateless_message_stream_response_stateless_message_stream_final_response_model.to_dict() + assert stateless_message_stream_response_stateless_message_stream_final_response_model_json2 == stateless_message_stream_response_stateless_message_stream_final_response_model_json + + +# endregion +############################################################################## +# End of Model Tests +############################################################################## diff --git a/test/unit/test_authorization_v1.py b/test/unit/test_authorization_v1.py deleted file mode 100644 index 0b2125c84..000000000 --- a/test/unit/test_authorization_v1.py +++ /dev/null @@ -1,16 +0,0 @@ -# coding: utf-8 -import responses -import ibm_watson - - -@responses.activate -def test_request_token(): - url = 'https://stream.watsonplatform.net/authorization/api/v1/token?url=https://stream.watsonplatform.net/speech-to-text/api' - responses.add(responses.GET, - url=url, - body=b'mocked token', - status=200) - authorization = ibm_watson.AuthorizationV1(username='xxx', password='yyy') - authorization.get_token(url=ibm_watson.SpeechToTextV1.default_url) - assert responses.calls[0].request.url == url - assert responses.calls[0].response.content.decode('utf-8') == 'mocked token' diff --git a/test/unit/test_common.py b/test/unit/test_common.py index a2553e86f..62e0bbf6b 100644 --- a/test/unit/test_common.py +++ b/test/unit/test_common.py @@ -1,6 +1,6 @@ # coding: utf-8 -# Copyright 2019 IBM All Rights Reserved. +# (C) Copyright IBM Corp. 2019, 2020. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,11 +17,16 @@ from ibm_watson import get_sdk_headers import unittest + class TestCommon(unittest.TestCase): + def test_get_sdk_headers(self): headers = get_sdk_headers('my_service', 'v1', 'my_operation') self.assertIsNotNone(headers) self.assertIsNotNone(headers.get('X-IBMCloud-SDK-Analytics')) self.assertIsNotNone(headers.get('User-Agent')) self.assertIn('watson-apis-python-sdk', headers.get('User-Agent')) - self.assertEqual(headers.get('X-IBMCloud-SDK-Analytics'), 'service_name=my_service;service_version=v1;operation_id=my_operation') + self.assertEqual( + headers.get('X-IBMCloud-SDK-Analytics'), + 'service_name=my_service;service_version=v1;operation_id=my_operation' + ) diff --git a/test/unit/test_compare_comply_v1.py b/test/unit/test_compare_comply_v1.py deleted file mode 100644 index 1252a7a5e..000000000 --- a/test/unit/test_compare_comply_v1.py +++ /dev/null @@ -1,556 +0,0 @@ -# coding: utf-8 -import responses -import ibm_watson -import json -import os - -from unittest import TestCase - -base_url = "https://gateway.watsonplatform.net/compare-comply/api" -feedback = { - "comment": "test commment", - "user_id": "wonder woman", - "feedback_id": "lala", - "feedback_data": { - "model_id": "contracts", - "original_labels": { - "categories": [ - { - "modification": "unchanged", - "provenance_ids": [], - "label": "Responsibilities" - }, - { - "modification": "removed", - "provenance_ids": [], - "label": "Amendments" - } - ], - "types": [ - { - "modification": "unchanged", - "provenance_ids": [ - "111", - "2222" - ], - "label": { - "party": "IBM", - "nature": "Obligation" - } - }, - { - "modification": "removed", - "provenance_ids": [ - "111", - "2222" - ], - "label": { - "party": "Exclusion", - "nature": "End User" - } - } - ] - }, - "text": "1. IBM will provide a Senior Managing Consultant / expert resource, for up to 80 hours, to assist Florida Power & Light (FPL) with the creation of an IT infrastructure unit cost model for existing infrastructure.", - "feedback_type": "element_classification", - "updated_labels": { - "categories": [ - { - "modification": "unchanged", - "label": "Responsibilities" - }, - { - "modification": "added", - "label": "Audits" - } - ], - "types": [ - { - "modification": "unchanged", - "label": { - "party": "IBM", - "nature": "Obligation" - } - }, - { - "modification": "added", - "label": { - "party": "Buyer", - "nature": "Disclaimer" - } - } - ] - }, - "model_version": "11.00", - "location": { - "begin": "214", - "end": "237" - }, - "document": { - "hash": "", - "title": "doc title" - } - }, - "created": "2018-11-16T22:57:14+0000" -} - - -batch = { - "function": "html_conversion", - "status": "completed", - "updated": "2018-11-12T21:02:43.867+0000", - "document_counts": { - "successful": 4, - "failed": 0, - "total": 4, - "pending": 0 - }, - "created": "2018-11-12T21:02:38.907+0000", - "input_bucket_location": "us-south", - "input_bucket_name": "compare-comply-integration-test-bucket-input", - "batch_id": "xxx", - "output_bucket_name": "compare-comply-integration-test-bucket-output", - "model": "contracts", - "output_bucket_location": "us-south" -} - -class TestCompareComplyV1(TestCase): - - @classmethod - def setUp(cls): - iam_url = "https://iam.cloud.ibm.com/identity/token" - iam_token_response = """{ - "access_token": "oAeisG8yqPY7sFR_x66Z15", - "token_type": "Bearer", - "expires_in": 3600, - "expiration": 1524167011, - "refresh_token": "jy4gl91BQ" - }""" - responses.add( - responses.POST, url=iam_url, body=iam_token_response, status=200) - - @responses.activate - def test_convert_to_html(self): - service = ibm_watson.CompareComplyV1( - '2016-10-20', iam_apikey='bogusapikey') - - url = "{0}{1}".format(base_url, '/v1/html_conversion') - - response = { - "hash": "0d9589556c16fca21c64ce9c8b10d065", - "html": "", - "num_pages": "4", - "publication_date": "2018-11-10", - "title": "Microsoft Word - contract_A.doc" - } - - responses.add( - responses.POST, - url, - body=json.dumps(response), - status=200, - content_type='application/json') - - with open( - os.path.join(os.path.dirname(__file__), - '../../resources/contract_A.pdf'), 'rb') as file: - service.convert_to_html( - file, - model_id="contracts", - file_content_type="application/octet-stream") - - assert len(responses.calls) == 2 - - @responses.activate - def test_classify_elements(self): - service = ibm_watson.CompareComplyV1( - '2016-10-20', iam_apikey='bogusapikey') - - url = "{0}{1}".format(base_url, '/v1/element_classification') - - response = [{ - "text": - "__November 9, 2018______________ date", - "categories": [], - "location": { - "begin": 19373, - "end": 19410 - }, - "types": [], - "attributes": [{ - "text": "November 9, 2018", - "type": "DateTime", - "location": { - "begin": 19375, - "end": 19391 - } - }] - }] - - responses.add( - responses.POST, - url, - body=json.dumps(response), - status=200, - content_type='application/json') - - with open(os.path.join(os.path.dirname(__file__), - '../../resources/contract_A.pdf'), 'rb') as file: - service.classify_elements( - file, - model_id="contracts", - file_content_type="application/octet-stream") - - assert len(responses.calls) == 2 - - @responses.activate - def test_extract_tables(self): - service = ibm_watson.CompareComplyV1( - '2016-10-20', iam_apikey='bogusapikey') - - url = "{0}{1}".format(base_url, '/v1/tables') - - response = { - "model_version": - "0.2.8-SNAPSHOT", - "model_id": - "tables", - "document": { - "hash": "0906a4721a59ffeaf2ec12997aa4f7f7", - "title": "Design and build accessible PDF tables, sample tables" - }, - "tables": [{ - "section_title": { - "text": "Sample tables ", - "location": { - "begin": 2099, - "end": 2113 - } - }, - "text": - "Column header (TH) Column header (TH) Column header (TH) Row header (TH) Data cell (TD) Data cell (TD) Row header(TH) Data cell (TD) Data cell (TD) ", - "table_headers": [], - "row_headers": [], - "location": { - "begin": 2832, - "end": 4801 - }, - "body_cells": [], - }] - } - - responses.add( - responses.POST, - url, - body=json.dumps(response), - status=200, - content_type='application/json') - - with open(os.path.join(os.path.dirname(__file__), - '../../resources/sample-tables.pdf'), 'rb') as file: - service.extract_tables(file) - - assert len(responses.calls) == 2 - - @responses.activate - def test_compare_documents(self): - service = ibm_watson.CompareComplyV1( - '2016-10-20', iam_apikey='bogusapikey') - - url = "{0}{1}".format(base_url, '/v1/comparison') - - response = { - "aligned_elements": [ - { - "element_pair": [{ - "text": - "WITNESSETH: that the Owner and Contractor undertake and agree as follows:", - "types": [], - "document_label": - "file_1", - "attributes": [], - "categories": [], - "location": { - "begin": 3845, - "end": 4085 - } - }, { - "text": - "WITNESSETH: that the Owner and Contractor undertake and agree as follows:", - "types": [], - "document_label": - "file_2", - "attributes": [], - "categories": [], - "location": { - "begin": 3846, - "end": 4086 - } - }], - "provenance_ids": - ["1mSG/96z1wY4De35LAExJzhCo2t0DfvbYnTl+vbavjY="], - }, - ], - "model_id": - "contracts", - "model_version": - "1.0.0" - } - - responses.add( - responses.POST, - url, - body=json.dumps(response), - status=200, - content_type='application/json') - - with open(os.path.join(os.path.dirname(__file__), - '../../resources/contract_A.pdf'), 'rb') as file1: - with open(os.path.join(os.path.dirname(__file__), - '../../resources/contract_B.pdf'), 'rb') as file2: - service.compare_documents(file1, file2) - - assert len(responses.calls) == 2 - - @responses.activate - def test_add_feedback(self): - service = ibm_watson.CompareComplyV1( - '2016-10-20', iam_apikey='bogusapikey') - - url = "{0}{1}".format(base_url, '/v1/feedback') - - feedback_data = { - "feedback_type": "element_classification", - "document": { - "hash": "", - "title": "doc title" - }, - "model_id": "contracts", - "model_version": "11.00", - "location": { - "begin": "214", - "end": "237" - }, - "text": "1. IBM will provide a Senior Managing Consultant / expert resource, for up to 80 hours, to assist Florida Power & Light (FPL) with the creation of an IT infrastructure unit cost model for existing infrastructure.", - "original_labels": { - "types": [ - { - "label": { - "nature": "Obligation", - "party": "IBM" - }, - "provenance_ids": [ - "85f5981a-ba91-44f5-9efa-0bd22e64b7bc", - "ce0480a1-5ef1-4c3e-9861-3743b5610795" - ] - }, - { - "label": { - "nature": "End User", - "party": "Exclusion" - }, - "provenance_ids": [ - "85f5981a-ba91-44f5-9efa-0bd22e64b7bc", - "ce0480a1-5ef1-4c3e-9861-3743b5610795" - ] - } - ], - "categories": [ - { - "label": "Responsibilities", - "provenance_ids": [] - }, - { - "label": "Amendments", - "provenance_ids": [] - } - ] - }, - "updated_labels": { - "types": [ - { - "label": { - "nature": "Obligation", - "party": "IBM" - } - }, - { - "label": { - "nature": "Disclaimer", - "party": "Buyer" - } - } - ], - "categories": [ - { - "label": "Responsibilities" - }, - { - "label": "Audits" - } - ] - } - } - - responses.add( - responses.POST, - url, - body=json.dumps(feedback), - status=200, - content_type='application/json') - - result = service.add_feedback( - feedback_data, - "wonder woman", - "test commment").get_result() - assert result["feedback_id"] == "lala" - - assert len(responses.calls) == 2 - - @responses.activate - def test_get_feedback(self): - service = ibm_watson.CompareComplyV1( - '2016-10-20', iam_apikey='bogusapikey') - - url = "{0}{1}".format(base_url, '/v1/feedback/xxx') - - responses.add( - responses.GET, - url, - body=json.dumps(feedback), - status=200, - content_type='application/json') - - result = service.get_feedback("xxx").get_result() - assert result["feedback_id"] == "lala" - - assert len(responses.calls) == 2 - - @responses.activate - def test_list_feedback(self): - service = ibm_watson.CompareComplyV1( - '2016-10-20', iam_apikey='bogusapikey') - - url = "{0}{1}".format(base_url, '/v1/feedback') - - responses.add( - responses.GET, - url, - body=json.dumps({"feedback":[feedback]}), - status=200, - content_type='application/json') - - result = service.list_feedback().get_result() - assert result["feedback"][0]["feedback_id"] == "lala" - - assert len(responses.calls) == 2 - - @responses.activate - def test_delete_feedback(self): - service = ibm_watson.CompareComplyV1( - '2016-10-20', iam_apikey='bogusapikey') - - url = "{0}{1}".format(base_url, '/v1/feedback/xxx') - - response = { - "status": 200, - "message": "Successfully deleted the feedback with id - 90ae2cb9-e6c5-43eb-a70f-199959f76019" - } - - responses.add( - responses.DELETE, - url, - body=json.dumps(response), - status=200, - content_type='application/json') - - result = service.delete_feedback("xxx").get_result() - assert result["status"] == 200 - - assert len(responses.calls) == 2 - - @responses.activate - def test_create_batch(self): - service = ibm_watson.CompareComplyV1( - '2016-10-20', iam_apikey='bogusapikey') - - url = "{0}{1}".format(base_url, '/v1/batches') - - responses.add( - responses.POST, - url, - body=json.dumps(batch), - status=200, - content_type='application/json') - - with open(os.path.join(os.path.dirname(__file__), - '../../resources/dummy-storage-credentials.json'), 'rb') as input_credentials_file: - with open(os.path.join(os.path.dirname(__file__), - '../../resources/dummy-storage-credentials.json'), 'rb') as output_credentials_file: - result = service.create_batch( - "html_conversion", - input_credentials_file, - "us-south", - "compare-comply-integration-test-bucket-input", - output_credentials_file, - "us-south", - "compare-comply-integration-test-bucket-output").get_result() - - assert result["batch_id"] == "xxx" - assert len(responses.calls) == 2 - - @responses.activate - def test_get_batch(self): - service = ibm_watson.CompareComplyV1( - '2016-10-20', iam_apikey='bogusapikey') - - url = "{0}{1}".format(base_url, '/v1/batches/xxx') - - responses.add( - responses.GET, - url, - body=json.dumps(batch), - status=200, - content_type='application/json') - - result = service.get_batch("xxx").get_result() - assert result["batch_id"] == "xxx" - - assert len(responses.calls) == 2 - - @responses.activate - def test_list_batches(self): - service = ibm_watson.CompareComplyV1( - '2016-10-20', iam_apikey='bogusapikey') - - url = "{0}{1}".format(base_url, '/v1/batches') - - responses.add( - responses.GET, - url, - body=json.dumps({"batches": [batch]}), - status=200, - content_type='application/json') - - result = service.list_batches().get_result() - assert result["batches"][0]["batch_id"] == "xxx" - - assert len(responses.calls) == 2 - - @responses.activate - def test_update_batch(self): - service = ibm_watson.CompareComplyV1( - '2016-10-20', iam_apikey='bogusapikey') - - url = "{0}{1}".format(base_url, '/v1/batches/xxx') - - responses.add( - responses.PUT, - url, - body=json.dumps(batch), - status=200, - content_type='application/json') - - result = service.update_batch("xxx", "rescan").get_result() - assert result["batch_id"] == "xxx" - assert len(responses.calls) == 2 diff --git a/test/unit/test_discovery_v1.py b/test/unit/test_discovery_v1.py deleted file mode 100644 index 0dc20269d..000000000 --- a/test/unit/test_discovery_v1.py +++ /dev/null @@ -1,1284 +0,0 @@ -# coding: utf-8 -import responses -import os -import json -import io -import ibm_watson -from ibm_watson.discovery_v1 import TrainingDataSet, TrainingQuery, TrainingExample - -try: - from urllib.parse import urlparse, urljoin -except ImportError: - from urlparse import urlparse, urljoin - -base_discovery_url = 'https://gateway.watsonplatform.net/discovery/api/v1/' - -platform_url = 'https://gateway.watsonplatform.net' -service_path = '/discovery/api' -base_url = '{0}{1}'.format(platform_url, service_path) - -version = '2016-12-01' -environment_id = 'envid' -collection_id = 'collid' - - -@responses.activate -def test_environments(): - discovery_url = urljoin(base_discovery_url, 'environments') - discovery_response_body = """{ - "environments": [ - { - "environment_id": "string", - "name": "envname", - "description": "", - "created": "2016-11-20T01:03:17.645Z", - "updated": "2016-11-20T01:03:17.645Z", - "status": "status", - "index_capacity": { - "disk_usage": { - "used_bytes": 0, - "total_bytes": 0, - "used": "string", - "total": "string", - "percent_used": 0 - }, - "memory_usage": { - "used_bytes": 0, - "total_bytes": 0, - "used": "string", - "total": "string", - "percent_used": 0 - } - } - } - ] -}""" - - responses.add(responses.GET, discovery_url, - body=discovery_response_body, status=200, - content_type='application/json') - - discovery = ibm_watson.DiscoveryV1('2016-11-07', - username='username', - password='password') - discovery.list_environments() - - url_str = "{0}?version=2016-11-07".format(discovery_url) - assert responses.calls[0].request.url == url_str - - assert responses.calls[0].response.text == discovery_response_body - assert len(responses.calls) == 1 - - -@responses.activate -def test_get_environment(): - discovery_url = urljoin(base_discovery_url, 'environments/envid') - responses.add(responses.GET, discovery_url, - body="{\"resulting_key\": true}", status=200, - content_type='application/json') - - discovery = ibm_watson.DiscoveryV1('2016-11-07', - username='username', - password='password') - discovery.get_environment(environment_id='envid') - url_str = "{0}?version=2016-11-07".format(discovery_url) - assert responses.calls[0].request.url == url_str - assert len(responses.calls) == 1 - - -@responses.activate -def test_create_environment(): - - discovery_url = urljoin(base_discovery_url, 'environments') - responses.add(responses.POST, discovery_url, - body="{\"resulting_key\": true}", status=200, - content_type='application/json') - - discovery = ibm_watson.DiscoveryV1('2016-11-07', - username='username', - password='password') - - discovery.create_environment(name="my name", description="my description") - assert len(responses.calls) == 1 - - -@responses.activate -def test_update_environment(): - discovery_url = urljoin(base_discovery_url, 'environments/envid') - responses.add(responses.PUT, discovery_url, - body="{\"resulting_key\": true}", status=200, - content_type='application/json') - - discovery = ibm_watson.DiscoveryV1('2016-11-07', - username='username', - password='password') - discovery.update_environment('envid', name="hello", description="new") - assert len(responses.calls) == 1 - - -@responses.activate -def test_delete_environment(): - discovery_url = urljoin(base_discovery_url, 'environments/envid') - responses.add(responses.DELETE, discovery_url, - body="{\"resulting_key\": true}", status=200, - content_type='application/json') - - discovery = ibm_watson.DiscoveryV1('2016-11-07', - username='username', - password='password') - discovery.delete_environment('envid') - assert len(responses.calls) == 1 - - -@responses.activate -def test_collections(): - discovery_url = urljoin(base_discovery_url, - 'environments/envid/collections') - - responses.add(responses.GET, discovery_url, - body="{\"body\": \"hello\"}", status=200, - content_type='application/json') - - discovery = ibm_watson.DiscoveryV1('2016-11-07', - username='username', - password='password') - discovery.list_collections('envid') - - called_url = urlparse(responses.calls[0].request.url) - test_url = urlparse(discovery_url) - - assert called_url.netloc == test_url.netloc - assert called_url.path == test_url.path - assert len(responses.calls) == 1 - - -@responses.activate -def test_collection(): - discovery_url = urljoin(base_discovery_url, - 'environments/envid/collections/collid') - - discovery_fields = urljoin(base_discovery_url, - 'environments/envid/collections/collid/fields') - config_url = urljoin(base_discovery_url, - 'environments/envid/configurations') - - responses.add(responses.GET, config_url, - body="{\"body\": \"hello\"}", - status=200, - content_type='application/json') - - responses.add(responses.GET, discovery_fields, - body="{\"body\": \"hello\"}", status=200, - content_type='application/json') - - responses.add(responses.GET, discovery_url, - body="{\"body\": \"hello\"}", status=200, - content_type='application/json') - - responses.add(responses.DELETE, discovery_url, - body="{\"body\": \"hello\"}", status=200, - content_type='application/json') - - responses.add(responses.POST, - urljoin(base_discovery_url, - 'environments/envid/collections'), - body="{\"body\": \"create\"}", - status=200, - content_type='application/json') - - discovery = ibm_watson.DiscoveryV1('2016-11-07', - username='username', - password='password') - discovery.create_collection(environment_id='envid', - name="name", - description="", - language="", - configuration_id='confid') - - discovery.create_collection(environment_id='envid', - name="name", - language="es", - description="") - - discovery.get_collection('envid', 'collid') - - called_url = urlparse(responses.calls[2].request.url) - test_url = urlparse(discovery_url) - - assert called_url.netloc == test_url.netloc - assert called_url.path == test_url.path - - discovery.delete_collection(environment_id='envid', - collection_id='collid') - discovery.list_collection_fields(environment_id='envid', - collection_id='collid') - assert len(responses.calls) == 5 - -@responses.activate -def test_federated_query(): - discovery_url = urljoin(base_discovery_url, - 'environments/envid/query') - - responses.add(responses.POST, discovery_url, - body="{\"body\": \"hello\"}", status=200, - content_type='application/json') - discovery = ibm_watson.DiscoveryV1('2016-11-07', - username='username', - password='password') - discovery.federated_query('envid', 'colls.sha1::9181d244*', collection_ids=['collid1', 'collid2']) - - called_url = urlparse(responses.calls[0].request.url) - test_url = urlparse(discovery_url) - - assert called_url.netloc == test_url.netloc - assert called_url.path == test_url.path - assert len(responses.calls) == 1 - -@responses.activate -def test_federated_query_2(): - discovery_url = urljoin(base_discovery_url, - 'environments/envid/query') - - responses.add(responses.POST, discovery_url, - body="{\"body\": \"hello\"}", status=200, - content_type='application/json') - discovery = ibm_watson.DiscoveryV1('2016-11-07', username='username', password='password') - discovery.federated_query('envid', collection_ids="'collid1', 'collid2'", - filter='colls.sha1::9181d244*', - bias='1', - logging_opt_out=True) - - called_url = urlparse(responses.calls[0].request.url) - test_url = urlparse(discovery_url) - - assert called_url.netloc == test_url.netloc - assert called_url.path == test_url.path - assert len(responses.calls) == 1 - -@responses.activate -def test_federated_query_notices(): - discovery_url = urljoin(base_discovery_url, - 'environments/envid/notices') - - responses.add(responses.GET, discovery_url, - body="{\"body\": \"hello\"}", status=200, - content_type='application/json') - discovery = ibm_watson.DiscoveryV1('2016-11-07', username='username', password='password') - discovery.federated_query_notices('envid', collection_ids=['collid1', 'collid2'], filter='notices.sha1::9181d244*') - - called_url = urlparse(responses.calls[0].request.url) - test_url = urlparse(discovery_url) - - assert called_url.netloc == test_url.netloc - assert called_url.path == test_url.path - assert len(responses.calls) == 1 - -@responses.activate -def test_query(): - discovery_url = urljoin(base_discovery_url, - 'environments/envid/collections/collid/query') - - responses.add(responses.POST, discovery_url, - body="{\"body\": \"hello\"}", status=200, - content_type='application/json') - discovery = ibm_watson.DiscoveryV1('2016-11-07', - username='username', - password='password') - discovery.query('envid', 'collid', - filter='extracted_metadata.sha1::9181d244*', - count=1, - passages=True, - passages_fields=['x', 'y'], - logging_opt_out='True', - passages_count=2) - - called_url = urlparse(responses.calls[0].request.url) - test_url = urlparse(discovery_url) - - assert called_url.netloc == test_url.netloc - assert called_url.path == test_url.path - assert len(responses.calls) == 1 - -@responses.activate -def test_query_2(): - discovery_url = urljoin(base_discovery_url, - 'environments/envid/collections/collid/query') - - responses.add(responses.POST, discovery_url, - body="{\"body\": \"hello\"}", status=200, - content_type='application/json') - discovery = ibm_watson.DiscoveryV1('2016-11-07', - username='username', - password='password') - discovery.query('envid', 'collid', - filter='extracted_metadata.sha1::9181d244*', - count=1, - passages=True, - passages_fields=['x', 'y'], - logging_opt_out='True', - passages_count=2, - bias='1', - collection_ids='1,2') - - called_url = urlparse(responses.calls[0].request.url) - test_url = urlparse(discovery_url) - - assert called_url.netloc == test_url.netloc - assert called_url.path == test_url.path - assert len(responses.calls) == 1 - -@responses.activate -def test_query_relations(): - discovery_url = urljoin( - base_discovery_url, - 'environments/envid/collections/collid/query_relations') - - responses.add( - responses.POST, - discovery_url, - body="{\"body\": \"hello\"}", - status=200, - content_type='application/json') - - discovery = ibm_watson.DiscoveryV1( - '2016-11-07', username='username', password='password') - - discovery.query_relations('envid', 'collid', count=10) - called_url = urlparse(responses.calls[0].request.url) - test_url = urlparse(discovery_url) - assert called_url.netloc == test_url.netloc - assert called_url.path == test_url.path - assert len(responses.calls) == 1 - - -@responses.activate -def test_query_entities(): - discovery_url = urljoin( - base_discovery_url, - 'environments/envid/collections/collid/query_entities') - - responses.add( - responses.POST, - discovery_url, - body="{\"body\": \"hello\"}", - status=200, - content_type='application/json') - - discovery = ibm_watson.DiscoveryV1( - '2016-11-07', username='username', password='password') - - discovery.query_entities('envid', 'collid', {'count': 10}) - called_url = urlparse(responses.calls[0].request.url) - test_url = urlparse(discovery_url) - assert called_url.netloc == test_url.netloc - assert called_url.path == test_url.path - assert len(responses.calls) == 1 - -@responses.activate -def test_query_notices(): - discovery_url = urljoin( - base_discovery_url, - 'environments/envid/collections/collid/notices') - - responses.add( - responses.GET, - discovery_url, - body="{\"body\": \"hello\"}", - status=200, - content_type='application/json') - - discovery = ibm_watson.DiscoveryV1( - '2016-11-07', username='username', password='password') - - discovery.query_notices('envid', 'collid', filter='notices.sha1::*') - called_url = urlparse(responses.calls[0].request.url) - test_url = urlparse(discovery_url) - assert called_url.netloc == test_url.netloc - assert called_url.path == test_url.path - assert len(responses.calls) == 1 - -@responses.activate -def test_configs(): - discovery_url = urljoin(base_discovery_url, - 'environments/envid/configurations') - discovery_config_id = urljoin(base_discovery_url, - 'environments/envid/configurations/confid') - - results = {"configurations": - [{"name": "Default Configuration", - "configuration_id": "confid"}]} - - responses.add(responses.GET, discovery_url, - body=json.dumps(results), - status=200, - content_type='application/json') - - responses.add(responses.GET, discovery_config_id, - body=json.dumps(results['configurations'][0]), - status=200, - content_type='application/json') - responses.add(responses.POST, discovery_url, - body=json.dumps(results['configurations'][0]), - status=200, - content_type='application/json') - responses.add(responses.PUT, discovery_config_id, - body=json.dumps(results['configurations'][0]), - status=200, - content_type='application/json') - responses.add(responses.DELETE, discovery_config_id, - body=json.dumps({'deleted': 'bogus -- ok'}), - status=200, - content_type='application/json') - - discovery = ibm_watson.DiscoveryV1('2016-11-07', - username='username', - password='password') - discovery.list_configurations(environment_id='envid') - - discovery.get_configuration(environment_id='envid', - configuration_id='confid') - - assert len(responses.calls) == 2 - - discovery.create_configuration(environment_id='envid', - name='my name') - discovery.create_configuration(environment_id='envid', - name='my name', - source={'type': 'salesforce', 'credential_id': 'xxx'}) - discovery.update_configuration(environment_id='envid', - configuration_id='confid', - name='my new name') - discovery.update_configuration(environment_id='envid', - configuration_id='confid', - name='my new name', - source={'type': 'salesforce', 'credential_id': 'xxx'}) - discovery.delete_configuration(environment_id='envid', - configuration_id='confid') - - assert len(responses.calls) == 7 - - -@responses.activate -def test_document(): - discovery_url = urljoin(base_discovery_url, - 'environments/envid/preview') - config_url = urljoin(base_discovery_url, - 'environments/envid/configurations') - responses.add(responses.POST, discovery_url, - body="{\"configurations\": []}", - status=200, - content_type='application/json') - responses.add(responses.GET, config_url, - body=json.dumps({"configurations": - [{"name": "Default Configuration", - "configuration_id": "confid"}]}), - status=200, - content_type='application/json') - - discovery = ibm_watson.DiscoveryV1('2016-11-07', - username='username', - password='password') - html_path = os.path.join(os.getcwd(), 'resources', 'simple.html') - with open(html_path) as fileinfo: - conf_id = discovery.test_configuration_in_environment(environment_id='envid', - configuration_id='bogus', - file=fileinfo) - assert conf_id is not None - conf_id = discovery.test_configuration_in_environment(environment_id='envid', - file=fileinfo) - assert conf_id is not None - - assert len(responses.calls) == 2 - - add_doc_url = urljoin(base_discovery_url, - 'environments/envid/collections/collid/documents') - - doc_id_path = 'environments/envid/collections/collid/documents/docid' - - update_doc_url = urljoin(base_discovery_url, doc_id_path) - del_doc_url = urljoin(base_discovery_url, - doc_id_path) - responses.add(responses.POST, add_doc_url, - body="{\"body\": []}", - status=200, - content_type='application/json') - - doc_status = { - "document_id": "45556e23-f2b1-449d-8f27-489b514000ff", - "configuration_id": "2e079259-7dd2-40a9-998f-3e716f5a7b88", - "created" : "2016-06-16T10:56:54.957Z", - "updated" : "2017-05-16T13:56:54.957Z", - "status": "available", - "status_description": "Document is successfully ingested and indexed with no warnings", - "notices": [] - } - - responses.add(responses.GET, del_doc_url, - body=json.dumps(doc_status), - status=200, - content_type='application/json') - - responses.add(responses.POST, update_doc_url, - body="{\"body\": []}", - status=200, - content_type='application/json') - - responses.add(responses.DELETE, del_doc_url, - body="{\"body\": []}", - status=200, - content_type='application/json') - - html_path = os.path.join(os.getcwd(), 'resources', 'simple.html') - with open(html_path) as fileinfo: - conf_id = discovery.add_document(environment_id='envid', - collection_id='collid', - file=fileinfo) - assert conf_id is not None - - assert len(responses.calls) == 3 - - discovery.get_document_status(environment_id='envid', - collection_id='collid', - document_id='docid') - - assert len(responses.calls) == 4 - - discovery.update_document(environment_id='envid', - collection_id='collid', - document_id='docid') - - assert len(responses.calls) == 5 - - discovery.update_document(environment_id='envid', - collection_id='collid', - document_id='docid') - - assert len(responses.calls) == 6 - - discovery.delete_document(environment_id='envid', - collection_id='collid', - document_id='docid') - - assert len(responses.calls) == 7 - - conf_id = discovery.add_document(environment_id='envid', - collection_id='collid', - file=io.StringIO(u'my string of file'), - filename='file.txt') - - assert len(responses.calls) == 8 - - conf_id = discovery.add_document(environment_id='envid', - collection_id='collid', - file=io.StringIO(u'

my string of file

'), - filename='file.html', - file_content_type='application/html') - - assert len(responses.calls) == 9 - - conf_id = discovery.add_document(environment_id='envid', - collection_id='collid', - file=io.StringIO(u'

my string of file

'), - filename='file.html', - file_content_type='application/html', - metadata=io.StringIO(u'{"stuff": "woot!"}')) - - assert len(responses.calls) == 10 - - -@responses.activate -def test_delete_all_training_data(): - training_endpoint = '/v1/environments/{0}/collections/{1}/training_data' - endpoint = training_endpoint.format(environment_id, collection_id) - url = '{0}{1}'.format(base_url, endpoint) - responses.add(responses.DELETE, url, status=204) - - service = ibm_watson.DiscoveryV1(version, username='username', password='password') - response = service.delete_all_training_data(environment_id=environment_id, - collection_id=collection_id).get_result() - - assert response is None - - -@responses.activate -def test_list_training_data(): - training_endpoint = '/v1/environments/{0}/collections/{1}/training_data' - endpoint = training_endpoint.format(environment_id, collection_id) - url = '{0}{1}'.format(base_url, endpoint) - mock_response = { - "environment_id": "string", - "collection_id": "string", - "queries": [ - { - "query_id": "string", - "natural_language_query": "string", - "filter": "string", - "examples": [ - { - "document_id": "string", - "cross_reference": "string", - "relevance": 0 - } - ] - } - ] - } - responses.add(responses.GET, - url, - body=json.dumps(mock_response), - status=200, - content_type='application/json') - - service = ibm_watson.DiscoveryV1(version, - username='username', - password='password') - response = service.list_training_data(environment_id=environment_id, - collection_id=collection_id).get_result() - - assert response == mock_response - # Verify that response can be converted to a TrainingDataSet - TrainingDataSet._from_dict(response) - - -@responses.activate -def test_add_training_data(): - training_endpoint = '/v1/environments/{0}/collections/{1}/training_data' - endpoint = training_endpoint.format(environment_id, collection_id) - url = '{0}{1}'.format(base_url, endpoint) - natural_language_query = "why is the sky blue" - filter = "text:meteorology" - examples = [ - { - "document_id": "54f95ac0-3e4f-4756-bea6-7a67b2713c81", - "relevance": 1 - }, - { - "document_id": "01bcca32-7300-4c9f-8d32-33ed7ea643da", - "cross_reference": "my_id_field:1463", - "relevance": 5 - } - ] - mock_response = { - "query_id": "string", - "natural_language_query": "string", - "filter": "string", - "examples": [ - { - "document_id": "string", - "cross_reference": "string", - "relevance": 0 - } - ] - } - responses.add(responses.POST, - url, - body=json.dumps(mock_response), - status=200, - content_type='application/json') - - service = ibm_watson.DiscoveryV1(version, - username='username', - password='password') - response = service.add_training_data( - environment_id=environment_id, - collection_id=collection_id, - natural_language_query=natural_language_query, - filter=filter, - examples=examples).get_result() - - assert response == mock_response - # Verify that response can be converted to a TrainingQuery - TrainingQuery._from_dict(response) - - -@responses.activate -def test_delete_training_data(): - training_endpoint = '/v1/environments/{0}/collections/{1}/training_data/{2}' - query_id = 'queryid' - endpoint = training_endpoint.format( - environment_id, collection_id, query_id) - url = '{0}{1}'.format(base_url, endpoint) - responses.add(responses.DELETE, url, status=204) - - service = ibm_watson.DiscoveryV1(version, - username='username', - password='password') - response = service.delete_training_data(environment_id=environment_id, - collection_id=collection_id, - query_id=query_id).get_result() - - assert response is None - - -@responses.activate -def test_get_training_data(): - training_endpoint = '/v1/environments/{0}/collections/{1}/training_data/{2}' - query_id = 'queryid' - endpoint = training_endpoint.format( - environment_id, collection_id, query_id) - url = '{0}{1}'.format(base_url, endpoint) - mock_response = { - "query_id": "string", - "natural_language_query": "string", - "filter": "string", - "examples": [ - { - "document_id": "string", - "cross_reference": "string", - "relevance": 0 - } - ] - } - responses.add(responses.GET, - url, - body=json.dumps(mock_response), - status=200, - content_type='application/json') - - service = ibm_watson.DiscoveryV1(version, username='username', password='password') - response = service.get_training_data(environment_id=environment_id, - collection_id=collection_id, - query_id=query_id).get_result() - - assert response == mock_response - # Verify that response can be converted to a TrainingQuery - TrainingQuery._from_dict(response) - - -@responses.activate -def test_create_training_example(): - examples_endpoint = '/v1/environments/{0}/collections/{1}/training_data' + \ - '/{2}/examples' - query_id = 'queryid' - endpoint = examples_endpoint.format( - environment_id, collection_id, query_id) - url = '{0}{1}'.format(base_url, endpoint) - document_id = "string" - relevance = 0 - cross_reference = "string" - mock_response = { - "document_id": "string", - "cross_reference": "string", - "relevance": 0 - } - responses.add(responses.POST, - url, - body=json.dumps(mock_response), - status=201, - content_type='application/json') - - service = ibm_watson.DiscoveryV1(version, - username='username', - password='password') - response = service.create_training_example( - environment_id=environment_id, - collection_id=collection_id, - query_id=query_id, - document_id=document_id, - relevance=relevance, - cross_reference=cross_reference).get_result() - - assert response == mock_response - # Verify that response can be converted to a TrainingExample - TrainingExample._from_dict(response) - - -@responses.activate -def test_delete_training_example(): - examples_endpoint = '/v1/environments/{0}/collections/{1}/training_data' + \ - '/{2}/examples/{3}' - query_id = 'queryid' - example_id = 'exampleid' - endpoint = examples_endpoint.format(environment_id, - collection_id, - query_id, - example_id) - url = '{0}{1}'.format(base_url, endpoint) - responses.add(responses.DELETE, url, status=204) - - service = ibm_watson.DiscoveryV1(version, username='username', password='password') - response = service.delete_training_example( - environment_id=environment_id, - collection_id=collection_id, - query_id=query_id, - example_id=example_id).get_result() - - assert response is None - - -@responses.activate -def test_get_training_example(): - examples_endpoint = '/v1/environments/{0}/collections/{1}/training_data' + \ - '/{2}/examples/{3}' - query_id = 'queryid' - example_id = 'exampleid' - endpoint = examples_endpoint.format(environment_id, - collection_id, - query_id, - example_id) - url = '{0}{1}'.format(base_url, endpoint) - mock_response = { - "document_id": "string", - "cross_reference": "string", - "relevance": 0 - } - responses.add(responses.GET, - url, - body=json.dumps(mock_response), - status=200, - content_type='application/json') - - service = ibm_watson.DiscoveryV1(version, username='username', password='password') - response = service.get_training_example( - environment_id=environment_id, - collection_id=collection_id, - query_id=query_id, - example_id=example_id).get_result() - - assert response == mock_response - # Verify that response can be converted to a TrainingExample - TrainingExample._from_dict(response) - - -@responses.activate -def test_update_training_example(): - examples_endpoint = '/v1/environments/{0}/collections/{1}/training_data' + \ - '/{2}/examples/{3}' - query_id = 'queryid' - example_id = 'exampleid' - endpoint = examples_endpoint.format(environment_id, - collection_id, - query_id, - example_id) - url = '{0}{1}'.format(base_url, endpoint) - relevance = 0 - cross_reference = "string" - mock_response = { - "document_id": "string", - "cross_reference": "string", - "relevance": 0 - } - responses.add(responses.PUT, - url, - body=json.dumps(mock_response), - status=200, - content_type='application/json') - - service = ibm_watson.DiscoveryV1(version, - username='username', - password='password') - response = service.update_training_example( - environment_id=environment_id, - collection_id=collection_id, - query_id=query_id, - example_id=example_id, - relevance=relevance, - cross_reference=cross_reference).get_result() - - assert response == mock_response - # Verify that response can be converted to a TrainingExample - TrainingExample._from_dict(response) - -@responses.activate -def test_expansions(): - url = 'https://gateway.watsonplatform.net/discovery/api/v1/environments/envid/collections/colid/expansions' - responses.add( - responses.GET, - url, - body='{"expansions": "results"}', - status=200, - content_type='application_json') - responses.add( - responses.DELETE, - url, - body='{"description": "success" }', - status=200, - content_type='application_json') - responses.add( - responses.POST, - url, - body='{"expansions": "success" }', - status=200, - content_type='application_json') - - discovery = ibm_watson.DiscoveryV1('2017-11-07', username="username", password="password") - - discovery.list_expansions('envid', 'colid') - assert responses.calls[0].response.json() == {"expansions": "results"} - - discovery.create_expansions('envid', 'colid', [{"input_terms": "dumb", "expanded_terms": "dumb2"}]) - assert responses.calls[1].response.json() == {"expansions": "success"} - - discovery.delete_expansions('envid', 'colid') - assert responses.calls[2].response.json() == {"description": "success"} - - assert len(responses.calls) == 3 - -@responses.activate -def test_delete_user_data(): - url = 'https://gateway.watsonplatform.net/discovery/api/v1/user_data' - responses.add( - responses.DELETE, - url, - body='{"description": "success" }', - status=204, - content_type='application_json') - - discovery = ibm_watson.DiscoveryV1('2017-11-07', username="username", password="password") - - response = discovery.delete_user_data('id').get_result() - assert response is None - assert len(responses.calls) == 1 - -@responses.activate -def test_credentials(): - discovery_credentials_url = urljoin(base_discovery_url, 'environments/envid/credentials') - - results = {'credential_id': 'e68305ce-29f3-48ea-b829-06653ca0fdef', - 'source_type': 'salesforce', - 'credential_details': { - 'url': 'https://login.salesforce.com', - 'credential_type': 'username_password', - 'username':'user@email.com'} - } - - iam_url = "https://iam.cloud.ibm.com/identity/token" - iam_token_response = """{ - "access_token": "oAeisG8yqPY7sFR_x66Z15", - "token_type": "Bearer", - "expires_in": 3600, - "expiration": 1524167011, - "refresh_token": "jy4gl91BQ" - }""" - responses.add(responses.POST, url=iam_url, body=iam_token_response, status=200) - responses.add(responses.GET, "{0}/{1}?version=2016-11-07".format(discovery_credentials_url, 'credential_id'), - body=json.dumps(results), - status=200, - content_type='application/json') - responses.add(responses.GET, "{0}?version=2016-11-07".format(discovery_credentials_url), - body=json.dumps([results]), - status=200, - content_type='application/json') - - responses.add(responses.POST, "{0}?version=2016-11-07".format(discovery_credentials_url), - body=json.dumps(results), - status=200, - content_type='application/json') - results['source_type'] = 'ibm' - responses.add(responses.PUT, "{0}/{1}?version=2016-11-07".format(discovery_credentials_url, 'credential_id'), - body=json.dumps(results), - status=200, - content_type='application/json') - responses.add(responses.DELETE, "{0}/{1}?version=2016-11-07".format(discovery_credentials_url, 'credential_id'), - body=json.dumps({'deleted': 'bogus -- ok'}), - status=200, - content_type='application/json') - - discovery = ibm_watson.DiscoveryV1('2016-11-07', iam_apikey='iam_apikey') - discovery.create_credentials('envid', 'salesforce', { - 'url': 'https://login.salesforce.com', - 'credential_type': 'username_password', - 'username':'user@email.com' - }) - - discovery.get_credentials('envid', 'credential_id') - - discovery.update_credentials(environment_id='envid', - credential_id='credential_id', - source_type='salesforce', - credential_details=results['credential_details']) - discovery.list_credentials('envid') - discovery.delete_credentials(environment_id='envid', credential_id='credential_id') - assert len(responses.calls) == 10 - -@responses.activate -def test_events_and_feedback(): - discovery_event_url = urljoin(base_discovery_url, 'events') - discovery_metrics_event_rate_url = urljoin(base_discovery_url, 'metrics/event_rate') - discovery_metrics_query_url = urljoin(base_discovery_url, 'metrics/number_of_queries') - discovery_metrics_query_event_url = urljoin(base_discovery_url, 'metrics/number_of_queries_with_event') - discovery_metrics_query_no_results_url = urljoin(base_discovery_url, 'metrics/number_of_queries_with_no_search_results') - discovery_metrics_query_token_event_url = urljoin(base_discovery_url, 'metrics/top_query_tokens_with_event_rate') - discovery_query_log_url = urljoin(base_discovery_url, 'logs') - - event_data = { - "environment_id": "xxx", - "session_token": "yyy", - "client_timestamp": "2018-08-14T14:39:59.268Z", - "display_rank": 0, - "collection_id": "abc", - "document_id": "xyz", - "query_id": "cde" - } - - create_event_response = { - "type": "click", - "data": event_data - } - - metric_response = { - "aggregations": [ - { - "interval": "1d", - "event_type": "click", - "results": [ - { - "key_as_string": "2018-08-14T14:39:59.309Z", - "key": 1533513600000, - "matching_results": 2, - "event_rate": 0.0 - } - ] - } - ] - } - - metric_token_response = { - "aggregations": [ - { - "event_type": "click", - "results": [ - { - "key": "content", - "matching_results": 5, - "event_rate": 0.6 - }, - { - "key": "first", - "matching_results": 5, - "event_rate": 0.6 - }, - { - "key": "of", - "matching_results": 5, - "event_rate": 0.6 - } - ] - } - ] - } - - log_query_response = { - "matching_results": 20, - "results": [ - { - "customer_id": "", - "environment_id": "xxx", - "natural_language_query": "The content of the first chapter", - "query_id": "1ICUdh3Pab", - "document_results": { - "count": 1, - "results": [ - { - "collection_id": "b67a82f3-6507-4c25-9757-3485ff4f2a32", - "score": 0.025773458, - "position": 10, - "document_id": "af0be20e-e130-4712-9a2e-37d9c8b9c52f" - } - ] - }, - "event_type": "query", - "session_token": "1_nbEfQtKVcg9qx3t41ICUdh3Pab", - "created_timestamp": "2018-08-14T18:20:30.460Z" - } - ] - } - - iam_url = "https://iam.cloud.ibm.com/identity/token" - iam_token_response = """{ - "access_token": "oAeisG8yqPY7sFR_x66Z15", - "token_type": "Bearer", - "expires_in": 3600, - "expiration": 1524167011, - "refresh_token": "jy4gl91BQ" - }""" - responses.add(responses.POST, url=iam_url, body=iam_token_response, status=200) - - responses.add(responses.POST, "{0}?version=2016-11-07".format(discovery_event_url), - body=json.dumps(create_event_response), - status=200, - content_type='application/json') - - responses.add(responses.GET, "{0}?version=2016-11-07".format(discovery_metrics_event_rate_url), - body=json.dumps(metric_response), - status=200, - content_type='application/json') - - responses.add(responses.GET, "{0}?version=2016-11-07".format(discovery_metrics_query_url), - body=json.dumps(metric_response), - status=200, - content_type='application/json') - - responses.add(responses.GET, "{0}?version=2016-11-07".format(discovery_metrics_query_event_url), - body=json.dumps(metric_response), - status=200, - content_type='application/json') - responses.add(responses.GET, "{0}?version=2016-11-07".format(discovery_metrics_query_no_results_url), - body=json.dumps(metric_response), - status=200, - content_type='application/json') - responses.add(responses.GET, "{0}?version=2016-11-07".format(discovery_metrics_query_token_event_url), - body=json.dumps(metric_token_response), - status=200, - content_type='application/json') - responses.add(responses.GET, "{0}?version=2016-11-07".format(discovery_query_log_url), - body=json.dumps(log_query_response), - status=200, - content_type='application/json') - - - discovery = ibm_watson.DiscoveryV1('2016-11-07', iam_apikey='iam_apikey') - - discovery.create_event('click', event_data) - assert responses.calls[1].response.json()["data"] == event_data - - discovery.get_metrics_event_rate('2018-08-13T14:39:59.309Z', - '2018-08-14T14:39:59.309Z', - 'document') - assert responses.calls[3].response.json() == metric_response - - discovery.get_metrics_query('2018-08-13T14:39:59.309Z', - '2018-08-14T14:39:59.309Z', - 'document') - assert responses.calls[5].response.json() == metric_response - - discovery.get_metrics_query_event('2018-08-13T14:39:59.309Z', - '2018-08-14T14:39:59.309Z', - 'document') - assert responses.calls[7].response.json() == metric_response - - discovery.get_metrics_query_no_results('2018-08-13T14:39:59.309Z', - '2018-08-14T14:39:59.309Z', - 'document') - assert responses.calls[9].response.json() == metric_response - - discovery.get_metrics_query_token_event(2) - assert responses.calls[11].response.json() == metric_token_response - - discovery.query_log() - assert responses.calls[13].response.json() == log_query_response - - assert len(responses.calls) == 14 - -@responses.activate -def test_tokenization_dictionary(): - url = 'https://gateway.watsonplatform.net/discovery/api/v1/environments/envid/collections/colid/word_lists/tokenization_dictionary?version=2017-11-07' - responses.add( - responses.POST, - url, - body='{"status": "pending"}', - status=200, - content_type='application_json') - responses.add( - responses.DELETE, - url, - body='{"status": "pending"}', - status=200) - responses.add( - responses.GET, - url, - body='{"status": "pending", "type":"tokenization_dictionary"}', - status=200, - content_type='application_json') - - discovery = ibm_watson.DiscoveryV1('2017-11-07', username="username", password="password") - - tokenization_rules = [ - { - 'text': 'token', - 'tokens': ['token 1', 'token 2'], - 'readings': ['reading 1', 'reading 2'], - 'part_of_speech': 'noun', - } - ] - discovery.create_tokenization_dictionary('envid', 'colid', tokenization_rules) - assert responses.calls[0].response.json() == {"status": "pending"} - - discovery.get_tokenization_dictionary_status('envid', 'colid') - assert responses.calls[1].response.json() == {"status": "pending", "type":"tokenization_dictionary"} - - discovery.delete_tokenization_dictionary('envid', 'colid') - assert responses.calls[2].response.status_code == 200 - - assert len(responses.calls) == 3 - -@responses.activate -def test_stopword_operations(): - url = 'https://gateway.watsonplatform.net/discovery/api/v1/environments/envid/collections/colid/word_lists/stopwords?version=2017-11-07' - responses.add( - responses.POST, - url, - body='{"status": "pending", "type": "stopwords"}', - status=200, - content_type='application_json') - responses.add( - responses.DELETE, - url, - status=200) - responses.add( - responses.GET, - url, - body='{"status": "ready", "type": "stopwords"}', - status=200, - content_type='application_json') - - discovery = ibm_watson.DiscoveryV1('2017-11-07', username="username", password="password") - - stopwords_file_path = os.path.join(os.getcwd(), 'resources', 'stopwords.txt') - with open(stopwords_file_path) as file: - discovery.create_stopword_list('envid', 'colid', file) - assert responses.calls[0].response.json() == {"status": "pending", "type": "stopwords"} - - discovery.get_stopword_list_status('envid', 'colid') - assert responses.calls[1].response.json() == {"status": "ready", "type": "stopwords"} - - discovery.delete_stopword_list('envid', 'colid') - assert responses.calls[2].response.status_code == 200 - - assert len(responses.calls) == 3 - -@responses.activate -def test_gateway_configuration(): - discovery_gateway_url = urljoin(base_discovery_url, 'environments/envid/gateways') - - gateway_details = { - "status": "idle", - "token_id": "9GnaCreixek_prod_ng", - "token": "4FByv9Mmd79x6c", - "name": "test-gateway-configuration-python", - "gateway_id": "gateway_id" - } - - iam_url = "https://iam.cloud.ibm.com/identity/token" - iam_token_response = """{ - "access_token": "oAeisG8yqPY7sFR_x66Z15", - "token_type": "Bearer", - "expires_in": 3600, - "expiration": 1524167011, - "refresh_token": "jy4gl91BQ" - }""" - responses.add(responses.POST, url=iam_url, body=iam_token_response, status=200) - responses.add(responses.GET, "{0}/{1}?version=2016-11-07".format(discovery_gateway_url, 'gateway_id'), - body=json.dumps(gateway_details), - status=200, - content_type='application/json') - responses.add(responses.POST, "{0}?version=2016-11-07".format(discovery_gateway_url), - body=json.dumps(gateway_details), - status=200, - content_type='application/json') - responses.add(responses.GET, "{0}?version=2016-11-07".format(discovery_gateway_url), - body=json.dumps({'gateways': [gateway_details]}), - status=200, - content_type='application/json') - responses.add(responses.DELETE, "{0}/{1}?version=2016-11-07".format(discovery_gateway_url, 'gateway_id'), - body=json.dumps({'gateway_id': 'gateway_id', 'status': 'deleted'}), - status=200, - content_type='application/json') - - discovery = ibm_watson.DiscoveryV1('2016-11-07', iam_apikey='iam_apikey') - - discovery.create_gateway('envid', 'gateway_id') - discovery.list_gateways('envid') - discovery.get_gateway('envid', 'gateway_id') - discovery.delete_gateway(environment_id='envid', gateway_id='gateway_id') - assert len(responses.calls) == 8 diff --git a/test/unit/test_discovery_v2.py b/test/unit/test_discovery_v2.py new file mode 100644 index 000000000..10401163d --- /dev/null +++ b/test/unit/test_discovery_v2.py @@ -0,0 +1,9960 @@ +# -*- coding: utf-8 -*- +# (C) Copyright IBM Corp. 2019, 2024. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Unit Tests for DiscoveryV2 +""" + +from datetime import datetime, timezone +from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator +from ibm_cloud_sdk_core.utils import datetime_to_string, string_to_datetime +import inspect +import io +import json +import pytest +import re +import requests +import responses +import tempfile +import urllib +from ibm_watson.discovery_v2 import * + +version = 'testString' + +_service = DiscoveryV2( + authenticator=NoAuthAuthenticator(), + version=version, +) + +_base_url = 'https://api.us-south.discovery.watson.cloud.ibm.com' +_service.set_service_url(_base_url) + + +def preprocess_url(operation_path: str): + """ + Returns the request url associated with the specified operation path. + This will be base_url concatenated with a quoted version of operation_path. + The returned request URL is used to register the mock response so it needs + to match the request URL that is formed by the requests library. + """ + + # Form the request URL from the base URL and operation path. + request_url = _base_url + operation_path + + # If the request url does NOT end with a /, then just return it as-is. + # Otherwise, return a regular expression that matches one or more trailing /. + if not request_url.endswith('/'): + return request_url + return re.compile(request_url.rstrip('/') + '/+') + + +############################################################################## +# Start of Service: Projects +############################################################################## +# region + + +class TestListProjects: + """ + Test Class for list_projects + """ + + @responses.activate + def test_list_projects_all_params(self): + """ + list_projects() + """ + # Set up mock + url = preprocess_url('/v2/projects') + mock_response = '{"projects": [{"project_id": "project_id", "name": "name", "type": "intelligent_document_processing", "relevancy_training_status": {"data_updated": "data_updated", "total_examples": 14, "sufficient_label_diversity": true, "processing": true, "minimum_examples_added": true, "successfully_trained": "successfully_trained", "available": false, "notices": 7, "minimum_queries_added": false}, "collection_count": 16}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Invoke method + response = _service.list_projects() + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_projects_all_params_with_retries(self): + # Enable retries and run test_list_projects_all_params. + _service.enable_retries() + self.test_list_projects_all_params() + + # Disable retries and run test_list_projects_all_params. + _service.disable_retries() + self.test_list_projects_all_params() + + @responses.activate + def test_list_projects_value_error(self): + """ + test_list_projects_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects') + mock_response = '{"projects": [{"project_id": "project_id", "name": "name", "type": "intelligent_document_processing", "relevancy_training_status": {"data_updated": "data_updated", "total_examples": 14, "sufficient_label_diversity": true, "processing": true, "minimum_examples_added": true, "successfully_trained": "successfully_trained", "available": false, "notices": 7, "minimum_queries_added": false}, "collection_count": 16}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.list_projects(**req_copy) + + def test_list_projects_value_error_with_retries(self): + # Enable retries and run test_list_projects_value_error. + _service.enable_retries() + self.test_list_projects_value_error() + + # Disable retries and run test_list_projects_value_error. + _service.disable_retries() + self.test_list_projects_value_error() + + +class TestCreateProject: + """ + Test Class for create_project + """ + + @responses.activate + def test_create_project_all_params(self): + """ + create_project() + """ + # Set up mock + url = preprocess_url('/v2/projects') + mock_response = '{"project_id": "project_id", "name": "name", "type": "intelligent_document_processing", "relevancy_training_status": {"data_updated": "data_updated", "total_examples": 14, "sufficient_label_diversity": true, "processing": true, "minimum_examples_added": true, "successfully_trained": "successfully_trained", "available": false, "notices": 7, "minimum_queries_added": false}, "collection_count": 16, "default_query_parameters": {"collection_ids": ["collection_ids"], "passages": {"enabled": false, "count": 5, "fields": ["fields"], "characters": 10, "per_document": true, "max_per_document": 16}, "table_results": {"enabled": false, "count": 5, "per_document": 0}, "aggregation": "aggregation", "suggested_refinements": {"enabled": false, "count": 5}, "spelling_suggestions": true, "highlight": false, "count": 5, "sort": "sort", "return": ["return_"]}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Construct a dict representation of a DefaultQueryParamsPassages model + default_query_params_passages_model = {} + default_query_params_passages_model['enabled'] = True + default_query_params_passages_model['count'] = 38 + default_query_params_passages_model['fields'] = ['testString'] + default_query_params_passages_model['characters'] = 38 + default_query_params_passages_model['per_document'] = True + default_query_params_passages_model['max_per_document'] = 38 + + # Construct a dict representation of a DefaultQueryParamsTableResults model + default_query_params_table_results_model = {} + default_query_params_table_results_model['enabled'] = True + default_query_params_table_results_model['count'] = 38 + default_query_params_table_results_model['per_document'] = 0 + + # Construct a dict representation of a DefaultQueryParamsSuggestedRefinements model + default_query_params_suggested_refinements_model = {} + default_query_params_suggested_refinements_model['enabled'] = True + default_query_params_suggested_refinements_model['count'] = 38 + + # Construct a dict representation of a DefaultQueryParams model + default_query_params_model = {} + default_query_params_model['collection_ids'] = ['testString'] + default_query_params_model['passages'] = default_query_params_passages_model + default_query_params_model['table_results'] = default_query_params_table_results_model + default_query_params_model['aggregation'] = 'testString' + default_query_params_model['suggested_refinements'] = default_query_params_suggested_refinements_model + default_query_params_model['spelling_suggestions'] = True + default_query_params_model['highlight'] = True + default_query_params_model['count'] = 38 + default_query_params_model['sort'] = 'testString' + default_query_params_model['return'] = ['testString'] + + # Set up parameter values + name = 'testString' + type = 'intelligent_document_processing' + default_query_parameters = default_query_params_model + + # Invoke method + response = _service.create_project( + name, + type, + default_query_parameters=default_query_parameters, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['name'] == 'testString' + assert req_body['type'] == 'intelligent_document_processing' + assert req_body['default_query_parameters'] == default_query_params_model + + def test_create_project_all_params_with_retries(self): + # Enable retries and run test_create_project_all_params. + _service.enable_retries() + self.test_create_project_all_params() + + # Disable retries and run test_create_project_all_params. + _service.disable_retries() + self.test_create_project_all_params() + + @responses.activate + def test_create_project_value_error(self): + """ + test_create_project_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects') + mock_response = '{"project_id": "project_id", "name": "name", "type": "intelligent_document_processing", "relevancy_training_status": {"data_updated": "data_updated", "total_examples": 14, "sufficient_label_diversity": true, "processing": true, "minimum_examples_added": true, "successfully_trained": "successfully_trained", "available": false, "notices": 7, "minimum_queries_added": false}, "collection_count": 16, "default_query_parameters": {"collection_ids": ["collection_ids"], "passages": {"enabled": false, "count": 5, "fields": ["fields"], "characters": 10, "per_document": true, "max_per_document": 16}, "table_results": {"enabled": false, "count": 5, "per_document": 0}, "aggregation": "aggregation", "suggested_refinements": {"enabled": false, "count": 5}, "spelling_suggestions": true, "highlight": false, "count": 5, "sort": "sort", "return": ["return_"]}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Construct a dict representation of a DefaultQueryParamsPassages model + default_query_params_passages_model = {} + default_query_params_passages_model['enabled'] = True + default_query_params_passages_model['count'] = 38 + default_query_params_passages_model['fields'] = ['testString'] + default_query_params_passages_model['characters'] = 38 + default_query_params_passages_model['per_document'] = True + default_query_params_passages_model['max_per_document'] = 38 + + # Construct a dict representation of a DefaultQueryParamsTableResults model + default_query_params_table_results_model = {} + default_query_params_table_results_model['enabled'] = True + default_query_params_table_results_model['count'] = 38 + default_query_params_table_results_model['per_document'] = 0 + + # Construct a dict representation of a DefaultQueryParamsSuggestedRefinements model + default_query_params_suggested_refinements_model = {} + default_query_params_suggested_refinements_model['enabled'] = True + default_query_params_suggested_refinements_model['count'] = 38 + + # Construct a dict representation of a DefaultQueryParams model + default_query_params_model = {} + default_query_params_model['collection_ids'] = ['testString'] + default_query_params_model['passages'] = default_query_params_passages_model + default_query_params_model['table_results'] = default_query_params_table_results_model + default_query_params_model['aggregation'] = 'testString' + default_query_params_model['suggested_refinements'] = default_query_params_suggested_refinements_model + default_query_params_model['spelling_suggestions'] = True + default_query_params_model['highlight'] = True + default_query_params_model['count'] = 38 + default_query_params_model['sort'] = 'testString' + default_query_params_model['return'] = ['testString'] + + # Set up parameter values + name = 'testString' + type = 'intelligent_document_processing' + default_query_parameters = default_query_params_model + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "name": name, + "type": type, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.create_project(**req_copy) + + def test_create_project_value_error_with_retries(self): + # Enable retries and run test_create_project_value_error. + _service.enable_retries() + self.test_create_project_value_error() + + # Disable retries and run test_create_project_value_error. + _service.disable_retries() + self.test_create_project_value_error() + + +class TestGetProject: + """ + Test Class for get_project + """ + + @responses.activate + def test_get_project_all_params(self): + """ + get_project() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString') + mock_response = '{"project_id": "project_id", "name": "name", "type": "intelligent_document_processing", "relevancy_training_status": {"data_updated": "data_updated", "total_examples": 14, "sufficient_label_diversity": true, "processing": true, "minimum_examples_added": true, "successfully_trained": "successfully_trained", "available": false, "notices": 7, "minimum_queries_added": false}, "collection_count": 16, "default_query_parameters": {"collection_ids": ["collection_ids"], "passages": {"enabled": false, "count": 5, "fields": ["fields"], "characters": 10, "per_document": true, "max_per_document": 16}, "table_results": {"enabled": false, "count": 5, "per_document": 0}, "aggregation": "aggregation", "suggested_refinements": {"enabled": false, "count": 5}, "spelling_suggestions": true, "highlight": false, "count": 5, "sort": "sort", "return": ["return_"]}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + + # Invoke method + response = _service.get_project( + project_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_get_project_all_params_with_retries(self): + # Enable retries and run test_get_project_all_params. + _service.enable_retries() + self.test_get_project_all_params() + + # Disable retries and run test_get_project_all_params. + _service.disable_retries() + self.test_get_project_all_params() + + @responses.activate + def test_get_project_value_error(self): + """ + test_get_project_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString') + mock_response = '{"project_id": "project_id", "name": "name", "type": "intelligent_document_processing", "relevancy_training_status": {"data_updated": "data_updated", "total_examples": 14, "sufficient_label_diversity": true, "processing": true, "minimum_examples_added": true, "successfully_trained": "successfully_trained", "available": false, "notices": 7, "minimum_queries_added": false}, "collection_count": 16, "default_query_parameters": {"collection_ids": ["collection_ids"], "passages": {"enabled": false, "count": 5, "fields": ["fields"], "characters": 10, "per_document": true, "max_per_document": 16}, "table_results": {"enabled": false, "count": 5, "per_document": 0}, "aggregation": "aggregation", "suggested_refinements": {"enabled": false, "count": 5}, "spelling_suggestions": true, "highlight": false, "count": 5, "sort": "sort", "return": ["return_"]}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_project(**req_copy) + + def test_get_project_value_error_with_retries(self): + # Enable retries and run test_get_project_value_error. + _service.enable_retries() + self.test_get_project_value_error() + + # Disable retries and run test_get_project_value_error. + _service.disable_retries() + self.test_get_project_value_error() + + +class TestUpdateProject: + """ + Test Class for update_project + """ + + @responses.activate + def test_update_project_all_params(self): + """ + update_project() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString') + mock_response = '{"project_id": "project_id", "name": "name", "type": "intelligent_document_processing", "relevancy_training_status": {"data_updated": "data_updated", "total_examples": 14, "sufficient_label_diversity": true, "processing": true, "minimum_examples_added": true, "successfully_trained": "successfully_trained", "available": false, "notices": 7, "minimum_queries_added": false}, "collection_count": 16, "default_query_parameters": {"collection_ids": ["collection_ids"], "passages": {"enabled": false, "count": 5, "fields": ["fields"], "characters": 10, "per_document": true, "max_per_document": 16}, "table_results": {"enabled": false, "count": 5, "per_document": 0}, "aggregation": "aggregation", "suggested_refinements": {"enabled": false, "count": 5}, "spelling_suggestions": true, "highlight": false, "count": 5, "sort": "sort", "return": ["return_"]}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + name = 'testString' + + # Invoke method + response = _service.update_project( + project_id, + name=name, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['name'] == 'testString' + + def test_update_project_all_params_with_retries(self): + # Enable retries and run test_update_project_all_params. + _service.enable_retries() + self.test_update_project_all_params() + + # Disable retries and run test_update_project_all_params. + _service.disable_retries() + self.test_update_project_all_params() + + @responses.activate + def test_update_project_required_params(self): + """ + test_update_project_required_params() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString') + mock_response = '{"project_id": "project_id", "name": "name", "type": "intelligent_document_processing", "relevancy_training_status": {"data_updated": "data_updated", "total_examples": 14, "sufficient_label_diversity": true, "processing": true, "minimum_examples_added": true, "successfully_trained": "successfully_trained", "available": false, "notices": 7, "minimum_queries_added": false}, "collection_count": 16, "default_query_parameters": {"collection_ids": ["collection_ids"], "passages": {"enabled": false, "count": 5, "fields": ["fields"], "characters": 10, "per_document": true, "max_per_document": 16}, "table_results": {"enabled": false, "count": 5, "per_document": 0}, "aggregation": "aggregation", "suggested_refinements": {"enabled": false, "count": 5}, "spelling_suggestions": true, "highlight": false, "count": 5, "sort": "sort", "return": ["return_"]}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + + # Invoke method + response = _service.update_project( + project_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_update_project_required_params_with_retries(self): + # Enable retries and run test_update_project_required_params. + _service.enable_retries() + self.test_update_project_required_params() + + # Disable retries and run test_update_project_required_params. + _service.disable_retries() + self.test_update_project_required_params() + + @responses.activate + def test_update_project_value_error(self): + """ + test_update_project_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString') + mock_response = '{"project_id": "project_id", "name": "name", "type": "intelligent_document_processing", "relevancy_training_status": {"data_updated": "data_updated", "total_examples": 14, "sufficient_label_diversity": true, "processing": true, "minimum_examples_added": true, "successfully_trained": "successfully_trained", "available": false, "notices": 7, "minimum_queries_added": false}, "collection_count": 16, "default_query_parameters": {"collection_ids": ["collection_ids"], "passages": {"enabled": false, "count": 5, "fields": ["fields"], "characters": 10, "per_document": true, "max_per_document": 16}, "table_results": {"enabled": false, "count": 5, "per_document": 0}, "aggregation": "aggregation", "suggested_refinements": {"enabled": false, "count": 5}, "spelling_suggestions": true, "highlight": false, "count": 5, "sort": "sort", "return": ["return_"]}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.update_project(**req_copy) + + def test_update_project_value_error_with_retries(self): + # Enable retries and run test_update_project_value_error. + _service.enable_retries() + self.test_update_project_value_error() + + # Disable retries and run test_update_project_value_error. + _service.disable_retries() + self.test_update_project_value_error() + + +class TestDeleteProject: + """ + Test Class for delete_project + """ + + @responses.activate + def test_delete_project_all_params(self): + """ + delete_project() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString') + responses.add( + responses.DELETE, + url, + status=204, + ) + + # Set up parameter values + project_id = 'testString' + + # Invoke method + response = _service.delete_project( + project_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 204 + + def test_delete_project_all_params_with_retries(self): + # Enable retries and run test_delete_project_all_params. + _service.enable_retries() + self.test_delete_project_all_params() + + # Disable retries and run test_delete_project_all_params. + _service.disable_retries() + self.test_delete_project_all_params() + + @responses.activate + def test_delete_project_value_error(self): + """ + test_delete_project_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString') + responses.add( + responses.DELETE, + url, + status=204, + ) + + # Set up parameter values + project_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_project(**req_copy) + + def test_delete_project_value_error_with_retries(self): + # Enable retries and run test_delete_project_value_error. + _service.enable_retries() + self.test_delete_project_value_error() + + # Disable retries and run test_delete_project_value_error. + _service.disable_retries() + self.test_delete_project_value_error() + + +class TestListFields: + """ + Test Class for list_fields + """ + + @responses.activate + def test_list_fields_all_params(self): + """ + list_fields() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/fields') + mock_response = '{"fields": [{"field": "field", "type": "nested", "collection_id": "collection_id"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + collection_ids = ['testString'] + + # Invoke method + response = _service.list_fields( + project_id, + collection_ids=collection_ids, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'collection_ids={}'.format(','.join(collection_ids)) in query_string + + def test_list_fields_all_params_with_retries(self): + # Enable retries and run test_list_fields_all_params. + _service.enable_retries() + self.test_list_fields_all_params() + + # Disable retries and run test_list_fields_all_params. + _service.disable_retries() + self.test_list_fields_all_params() + + @responses.activate + def test_list_fields_required_params(self): + """ + test_list_fields_required_params() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/fields') + mock_response = '{"fields": [{"field": "field", "type": "nested", "collection_id": "collection_id"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + + # Invoke method + response = _service.list_fields( + project_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_fields_required_params_with_retries(self): + # Enable retries and run test_list_fields_required_params. + _service.enable_retries() + self.test_list_fields_required_params() + + # Disable retries and run test_list_fields_required_params. + _service.disable_retries() + self.test_list_fields_required_params() + + @responses.activate + def test_list_fields_value_error(self): + """ + test_list_fields_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/fields') + mock_response = '{"fields": [{"field": "field", "type": "nested", "collection_id": "collection_id"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.list_fields(**req_copy) + + def test_list_fields_value_error_with_retries(self): + # Enable retries and run test_list_fields_value_error. + _service.enable_retries() + self.test_list_fields_value_error() + + # Disable retries and run test_list_fields_value_error. + _service.disable_retries() + self.test_list_fields_value_error() + + +# endregion +############################################################################## +# End of Service: Projects +############################################################################## + +############################################################################## +# Start of Service: Collections +############################################################################## +# region + + +class TestListCollections: + """ + Test Class for list_collections + """ + + @responses.activate + def test_list_collections_all_params(self): + """ + list_collections() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections') + mock_response = '{"collections": [{"collection_id": "collection_id", "name": "name"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + + # Invoke method + response = _service.list_collections( + project_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_collections_all_params_with_retries(self): + # Enable retries and run test_list_collections_all_params. + _service.enable_retries() + self.test_list_collections_all_params() + + # Disable retries and run test_list_collections_all_params. + _service.disable_retries() + self.test_list_collections_all_params() + + @responses.activate + def test_list_collections_value_error(self): + """ + test_list_collections_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections') + mock_response = '{"collections": [{"collection_id": "collection_id", "name": "name"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.list_collections(**req_copy) + + def test_list_collections_value_error_with_retries(self): + # Enable retries and run test_list_collections_value_error. + _service.enable_retries() + self.test_list_collections_value_error() + + # Disable retries and run test_list_collections_value_error. + _service.disable_retries() + self.test_list_collections_value_error() + + +class TestCreateCollection: + """ + Test Class for create_collection + """ + + @responses.activate + def test_create_collection_all_params(self): + """ + create_collection() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections') + mock_response = '{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "ocr_enabled": false, "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}], "smart_document_understanding": {"enabled": false, "model": "custom"}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Construct a dict representation of a CollectionEnrichment model + collection_enrichment_model = {} + collection_enrichment_model['enrichment_id'] = 'testString' + collection_enrichment_model['fields'] = ['testString'] + + # Set up parameter values + project_id = 'testString' + name = 'testString' + description = 'testString' + language = 'en' + ocr_enabled = False + enrichments = [collection_enrichment_model] + + # Invoke method + response = _service.create_collection( + project_id, + name, + description=description, + language=language, + ocr_enabled=ocr_enabled, + enrichments=enrichments, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['name'] == 'testString' + assert req_body['description'] == 'testString' + assert req_body['language'] == 'en' + assert req_body['ocr_enabled'] == False + assert req_body['enrichments'] == [collection_enrichment_model] + + def test_create_collection_all_params_with_retries(self): + # Enable retries and run test_create_collection_all_params. + _service.enable_retries() + self.test_create_collection_all_params() + + # Disable retries and run test_create_collection_all_params. + _service.disable_retries() + self.test_create_collection_all_params() + + @responses.activate + def test_create_collection_value_error(self): + """ + test_create_collection_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections') + mock_response = '{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "ocr_enabled": false, "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}], "smart_document_understanding": {"enabled": false, "model": "custom"}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Construct a dict representation of a CollectionEnrichment model + collection_enrichment_model = {} + collection_enrichment_model['enrichment_id'] = 'testString' + collection_enrichment_model['fields'] = ['testString'] + + # Set up parameter values + project_id = 'testString' + name = 'testString' + description = 'testString' + language = 'en' + ocr_enabled = False + enrichments = [collection_enrichment_model] + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "name": name, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.create_collection(**req_copy) + + def test_create_collection_value_error_with_retries(self): + # Enable retries and run test_create_collection_value_error. + _service.enable_retries() + self.test_create_collection_value_error() + + # Disable retries and run test_create_collection_value_error. + _service.disable_retries() + self.test_create_collection_value_error() + + +class TestGetCollection: + """ + Test Class for get_collection + """ + + @responses.activate + def test_get_collection_all_params(self): + """ + get_collection() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString') + mock_response = '{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "ocr_enabled": false, "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}], "smart_document_understanding": {"enabled": false, "model": "custom"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + + # Invoke method + response = _service.get_collection( + project_id, + collection_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_get_collection_all_params_with_retries(self): + # Enable retries and run test_get_collection_all_params. + _service.enable_retries() + self.test_get_collection_all_params() + + # Disable retries and run test_get_collection_all_params. + _service.disable_retries() + self.test_get_collection_all_params() + + @responses.activate + def test_get_collection_value_error(self): + """ + test_get_collection_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString') + mock_response = '{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "ocr_enabled": false, "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}], "smart_document_understanding": {"enabled": false, "model": "custom"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "collection_id": collection_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_collection(**req_copy) + + def test_get_collection_value_error_with_retries(self): + # Enable retries and run test_get_collection_value_error. + _service.enable_retries() + self.test_get_collection_value_error() + + # Disable retries and run test_get_collection_value_error. + _service.disable_retries() + self.test_get_collection_value_error() + + +class TestUpdateCollection: + """ + Test Class for update_collection + """ + + @responses.activate + def test_update_collection_all_params(self): + """ + update_collection() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString') + mock_response = '{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "ocr_enabled": false, "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}], "smart_document_understanding": {"enabled": false, "model": "custom"}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Construct a dict representation of a CollectionEnrichment model + collection_enrichment_model = {} + collection_enrichment_model['enrichment_id'] = 'testString' + collection_enrichment_model['fields'] = ['testString'] + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + name = 'testString' + description = 'testString' + ocr_enabled = False + enrichments = [collection_enrichment_model] + + # Invoke method + response = _service.update_collection( + project_id, + collection_id, + name=name, + description=description, + ocr_enabled=ocr_enabled, + enrichments=enrichments, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['name'] == 'testString' + assert req_body['description'] == 'testString' + assert req_body['ocr_enabled'] == False + assert req_body['enrichments'] == [collection_enrichment_model] + + def test_update_collection_all_params_with_retries(self): + # Enable retries and run test_update_collection_all_params. + _service.enable_retries() + self.test_update_collection_all_params() + + # Disable retries and run test_update_collection_all_params. + _service.disable_retries() + self.test_update_collection_all_params() + + @responses.activate + def test_update_collection_value_error(self): + """ + test_update_collection_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString') + mock_response = '{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "ocr_enabled": false, "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}], "smart_document_understanding": {"enabled": false, "model": "custom"}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Construct a dict representation of a CollectionEnrichment model + collection_enrichment_model = {} + collection_enrichment_model['enrichment_id'] = 'testString' + collection_enrichment_model['fields'] = ['testString'] + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + name = 'testString' + description = 'testString' + ocr_enabled = False + enrichments = [collection_enrichment_model] + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "collection_id": collection_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.update_collection(**req_copy) + + def test_update_collection_value_error_with_retries(self): + # Enable retries and run test_update_collection_value_error. + _service.enable_retries() + self.test_update_collection_value_error() + + # Disable retries and run test_update_collection_value_error. + _service.disable_retries() + self.test_update_collection_value_error() + + +class TestDeleteCollection: + """ + Test Class for delete_collection + """ + + @responses.activate + def test_delete_collection_all_params(self): + """ + delete_collection() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString') + responses.add( + responses.DELETE, + url, + status=204, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + + # Invoke method + response = _service.delete_collection( + project_id, + collection_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 204 + + def test_delete_collection_all_params_with_retries(self): + # Enable retries and run test_delete_collection_all_params. + _service.enable_retries() + self.test_delete_collection_all_params() + + # Disable retries and run test_delete_collection_all_params. + _service.disable_retries() + self.test_delete_collection_all_params() + + @responses.activate + def test_delete_collection_value_error(self): + """ + test_delete_collection_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString') + responses.add( + responses.DELETE, + url, + status=204, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "collection_id": collection_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_collection(**req_copy) + + def test_delete_collection_value_error_with_retries(self): + # Enable retries and run test_delete_collection_value_error. + _service.enable_retries() + self.test_delete_collection_value_error() + + # Disable retries and run test_delete_collection_value_error. + _service.disable_retries() + self.test_delete_collection_value_error() + + +# endregion +############################################################################## +# End of Service: Collections +############################################################################## + +############################################################################## +# Start of Service: Documents +############################################################################## +# region + + +class TestListDocuments: + """ + Test Class for list_documents + """ + + @responses.activate + def test_list_documents_all_params(self): + """ + list_documents() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/documents') + mock_response = '{"matching_results": 16, "documents": [{"document_id": "document_id", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status": "available", "notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "collection_id": "collection_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}], "children": {"have_notices": true, "count": 5}, "filename": "filename", "file_type": "file_type", "sha256": "sha256"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + count = 1000 + status = 'testString' + has_notices = True + is_parent = True + parent_document_id = 'testString' + sha256 = 'testString' + + # Invoke method + response = _service.list_documents( + project_id, + collection_id, + count=count, + status=status, + has_notices=has_notices, + is_parent=is_parent, + parent_document_id=parent_document_id, + sha256=sha256, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'count={}'.format(count) in query_string + assert 'status={}'.format(status) in query_string + assert 'has_notices={}'.format('true' if has_notices else 'false') in query_string + assert 'is_parent={}'.format('true' if is_parent else 'false') in query_string + assert 'parent_document_id={}'.format(parent_document_id) in query_string + assert 'sha256={}'.format(sha256) in query_string + + def test_list_documents_all_params_with_retries(self): + # Enable retries and run test_list_documents_all_params. + _service.enable_retries() + self.test_list_documents_all_params() + + # Disable retries and run test_list_documents_all_params. + _service.disable_retries() + self.test_list_documents_all_params() + + @responses.activate + def test_list_documents_required_params(self): + """ + test_list_documents_required_params() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/documents') + mock_response = '{"matching_results": 16, "documents": [{"document_id": "document_id", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status": "available", "notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "collection_id": "collection_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}], "children": {"have_notices": true, "count": 5}, "filename": "filename", "file_type": "file_type", "sha256": "sha256"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + + # Invoke method + response = _service.list_documents( + project_id, + collection_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_documents_required_params_with_retries(self): + # Enable retries and run test_list_documents_required_params. + _service.enable_retries() + self.test_list_documents_required_params() + + # Disable retries and run test_list_documents_required_params. + _service.disable_retries() + self.test_list_documents_required_params() + + @responses.activate + def test_list_documents_value_error(self): + """ + test_list_documents_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/documents') + mock_response = '{"matching_results": 16, "documents": [{"document_id": "document_id", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status": "available", "notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "collection_id": "collection_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}], "children": {"have_notices": true, "count": 5}, "filename": "filename", "file_type": "file_type", "sha256": "sha256"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "collection_id": collection_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.list_documents(**req_copy) + + def test_list_documents_value_error_with_retries(self): + # Enable retries and run test_list_documents_value_error. + _service.enable_retries() + self.test_list_documents_value_error() + + # Disable retries and run test_list_documents_value_error. + _service.disable_retries() + self.test_list_documents_value_error() + + +class TestAddDocument: + """ + Test Class for add_document + """ + + @responses.activate + def test_add_document_all_params(self): + """ + add_document() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/documents') + mock_response = '{"document_id": "document_id", "status": "processing"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=202, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + file = io.BytesIO(b'This is a mock file.').getvalue() + filename = 'testString' + file_content_type = 'application/json' + metadata = 'testString' + x_watson_discovery_force = False + + # Invoke method + response = _service.add_document( + project_id, + collection_id, + file=file, + filename=filename, + file_content_type=file_content_type, + metadata=metadata, + x_watson_discovery_force=x_watson_discovery_force, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 202 + + def test_add_document_all_params_with_retries(self): + # Enable retries and run test_add_document_all_params. + _service.enable_retries() + self.test_add_document_all_params() + + # Disable retries and run test_add_document_all_params. + _service.disable_retries() + self.test_add_document_all_params() + + @responses.activate + def test_add_document_required_params(self): + """ + test_add_document_required_params() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/documents') + mock_response = '{"document_id": "document_id", "status": "processing"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=202, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + + # Invoke method + response = _service.add_document( + project_id, + collection_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 202 + + def test_add_document_required_params_with_retries(self): + # Enable retries and run test_add_document_required_params. + _service.enable_retries() + self.test_add_document_required_params() + + # Disable retries and run test_add_document_required_params. + _service.disable_retries() + self.test_add_document_required_params() + + @responses.activate + def test_add_document_value_error(self): + """ + test_add_document_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/documents') + mock_response = '{"document_id": "document_id", "status": "processing"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=202, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "collection_id": collection_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.add_document(**req_copy) + + def test_add_document_value_error_with_retries(self): + # Enable retries and run test_add_document_value_error. + _service.enable_retries() + self.test_add_document_value_error() + + # Disable retries and run test_add_document_value_error. + _service.disable_retries() + self.test_add_document_value_error() + + +class TestGetDocument: + """ + Test Class for get_document + """ + + @responses.activate + def test_get_document_all_params(self): + """ + get_document() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/documents/testString') + mock_response = '{"document_id": "document_id", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status": "available", "notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "collection_id": "collection_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}], "children": {"have_notices": true, "count": 5}, "filename": "filename", "file_type": "file_type", "sha256": "sha256"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + document_id = 'testString' + + # Invoke method + response = _service.get_document( + project_id, + collection_id, + document_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_get_document_all_params_with_retries(self): + # Enable retries and run test_get_document_all_params. + _service.enable_retries() + self.test_get_document_all_params() + + # Disable retries and run test_get_document_all_params. + _service.disable_retries() + self.test_get_document_all_params() + + @responses.activate + def test_get_document_value_error(self): + """ + test_get_document_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/documents/testString') + mock_response = '{"document_id": "document_id", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status": "available", "notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "collection_id": "collection_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}], "children": {"have_notices": true, "count": 5}, "filename": "filename", "file_type": "file_type", "sha256": "sha256"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + document_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "collection_id": collection_id, + "document_id": document_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_document(**req_copy) + + def test_get_document_value_error_with_retries(self): + # Enable retries and run test_get_document_value_error. + _service.enable_retries() + self.test_get_document_value_error() + + # Disable retries and run test_get_document_value_error. + _service.disable_retries() + self.test_get_document_value_error() + + +class TestUpdateDocument: + """ + Test Class for update_document + """ + + @responses.activate + def test_update_document_all_params(self): + """ + update_document() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/documents/testString') + mock_response = '{"document_id": "document_id", "status": "processing"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=202, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + document_id = 'testString' + file = io.BytesIO(b'This is a mock file.').getvalue() + filename = 'testString' + file_content_type = 'application/json' + metadata = 'testString' + x_watson_discovery_force = False + + # Invoke method + response = _service.update_document( + project_id, + collection_id, + document_id, + file=file, + filename=filename, + file_content_type=file_content_type, + metadata=metadata, + x_watson_discovery_force=x_watson_discovery_force, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 202 + + def test_update_document_all_params_with_retries(self): + # Enable retries and run test_update_document_all_params. + _service.enable_retries() + self.test_update_document_all_params() + + # Disable retries and run test_update_document_all_params. + _service.disable_retries() + self.test_update_document_all_params() + + @responses.activate + def test_update_document_required_params(self): + """ + test_update_document_required_params() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/documents/testString') + mock_response = '{"document_id": "document_id", "status": "processing"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=202, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + document_id = 'testString' + + # Invoke method + response = _service.update_document( + project_id, + collection_id, + document_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 202 + + def test_update_document_required_params_with_retries(self): + # Enable retries and run test_update_document_required_params. + _service.enable_retries() + self.test_update_document_required_params() + + # Disable retries and run test_update_document_required_params. + _service.disable_retries() + self.test_update_document_required_params() + + @responses.activate + def test_update_document_value_error(self): + """ + test_update_document_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/documents/testString') + mock_response = '{"document_id": "document_id", "status": "processing"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=202, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + document_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "collection_id": collection_id, + "document_id": document_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.update_document(**req_copy) + + def test_update_document_value_error_with_retries(self): + # Enable retries and run test_update_document_value_error. + _service.enable_retries() + self.test_update_document_value_error() + + # Disable retries and run test_update_document_value_error. + _service.disable_retries() + self.test_update_document_value_error() + + +class TestDeleteDocument: + """ + Test Class for delete_document + """ + + @responses.activate + def test_delete_document_all_params(self): + """ + delete_document() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/documents/testString') + mock_response = '{"document_id": "document_id", "status": "deleted"}' + responses.add( + responses.DELETE, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + document_id = 'testString' + x_watson_discovery_force = False + + # Invoke method + response = _service.delete_document( + project_id, + collection_id, + document_id, + x_watson_discovery_force=x_watson_discovery_force, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_delete_document_all_params_with_retries(self): + # Enable retries and run test_delete_document_all_params. + _service.enable_retries() + self.test_delete_document_all_params() + + # Disable retries and run test_delete_document_all_params. + _service.disable_retries() + self.test_delete_document_all_params() + + @responses.activate + def test_delete_document_required_params(self): + """ + test_delete_document_required_params() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/documents/testString') + mock_response = '{"document_id": "document_id", "status": "deleted"}' + responses.add( + responses.DELETE, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + document_id = 'testString' + + # Invoke method + response = _service.delete_document( + project_id, + collection_id, + document_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_delete_document_required_params_with_retries(self): + # Enable retries and run test_delete_document_required_params. + _service.enable_retries() + self.test_delete_document_required_params() + + # Disable retries and run test_delete_document_required_params. + _service.disable_retries() + self.test_delete_document_required_params() + + @responses.activate + def test_delete_document_value_error(self): + """ + test_delete_document_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/documents/testString') + mock_response = '{"document_id": "document_id", "status": "deleted"}' + responses.add( + responses.DELETE, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + document_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "collection_id": collection_id, + "document_id": document_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_document(**req_copy) + + def test_delete_document_value_error_with_retries(self): + # Enable retries and run test_delete_document_value_error. + _service.enable_retries() + self.test_delete_document_value_error() + + # Disable retries and run test_delete_document_value_error. + _service.disable_retries() + self.test_delete_document_value_error() + + +# endregion +############################################################################## +# End of Service: Documents +############################################################################## + +############################################################################## +# Start of Service: Queries +############################################################################## +# region + + +class TestQuery: + """ + Test Class for query + """ + + @responses.activate + def test_query_all_params(self): + """ + query() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/query') + mock_response = '{"matching_results": 16, "results": [{"document_id": "document_id", "metadata": {"anyKey": "anyValue"}, "result_metadata": {"document_retrieval_source": "search", "collection_id": "collection_id", "confidence": 0}, "document_passages": [{"passage_text": "passage_text", "start_offset": 12, "end_offset": 10, "field": "field", "answers": [{"answer_text": "answer_text", "start_offset": 12, "end_offset": 10, "confidence": 0}]}]}], "aggregations": [{"type": "term", "field": "field", "count": 5, "name": "name", "results": [{"key": "key", "matching_results": 16, "relevancy": 9, "total_matching_documents": 24, "estimated_matching_results": 26, "aggregations": [{"anyKey": "anyValue"}]}]}], "retrieval_details": {"document_retrieval_strategy": "untrained"}, "suggested_query": "suggested_query", "suggested_refinements": [{"text": "text"}], "table_results": [{"table_id": "table_id", "source_document_id": "source_document_id", "collection_id": "collection_id", "table_html": "table_html", "table_html_offset": 17, "table": {"location": {"begin": 5, "end": 3}, "text": "text", "section_title": {"text": "text", "location": {"begin": 5, "end": 3}}, "title": {"text": "text", "location": {"begin": 5, "end": 3}}, "table_headers": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16}], "row_headers": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text", "text_normalized": "text_normalized", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16}], "column_headers": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text", "text_normalized": "text_normalized", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16}], "key_value_pairs": [{"key": {"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text"}, "value": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text"}]}], "body_cells": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16, "row_header_ids": ["row_header_ids"], "row_header_texts": ["row_header_texts"], "row_header_texts_normalized": ["row_header_texts_normalized"], "column_header_ids": ["column_header_ids"], "column_header_texts": ["column_header_texts"], "column_header_texts_normalized": ["column_header_texts_normalized"], "attributes": [{"type": "type", "text": "text", "location": {"begin": 5, "end": 3}}]}], "contexts": [{"text": "text", "location": {"begin": 5, "end": 3}}]}}], "passages": [{"passage_text": "passage_text", "passage_score": 13, "document_id": "document_id", "collection_id": "collection_id", "start_offset": 12, "end_offset": 10, "field": "field", "answers": [{"answer_text": "answer_text", "start_offset": 12, "end_offset": 10, "confidence": 0}]}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Construct a dict representation of a QueryLargeTableResults model + query_large_table_results_model = {} + query_large_table_results_model['enabled'] = True + query_large_table_results_model['count'] = 38 + + # Construct a dict representation of a QueryLargeSuggestedRefinements model + query_large_suggested_refinements_model = {} + query_large_suggested_refinements_model['enabled'] = True + query_large_suggested_refinements_model['count'] = 1 + + # Construct a dict representation of a QueryLargePassages model + query_large_passages_model = {} + query_large_passages_model['enabled'] = True + query_large_passages_model['per_document'] = True + query_large_passages_model['max_per_document'] = 38 + query_large_passages_model['fields'] = ['testString'] + query_large_passages_model['count'] = 400 + query_large_passages_model['characters'] = 50 + query_large_passages_model['find_answers'] = False + query_large_passages_model['max_answers_per_passage'] = 1 + + # Construct a dict representation of a QueryLargeSimilar model + query_large_similar_model = {} + query_large_similar_model['enabled'] = False + query_large_similar_model['document_ids'] = ['testString'] + query_large_similar_model['fields'] = ['testString'] + + # Set up parameter values + project_id = 'testString' + collection_ids = ['testString'] + filter = 'testString' + query = 'testString' + natural_language_query = 'testString' + aggregation = 'testString' + count = 38 + return_ = ['testString'] + offset = 38 + sort = 'testString' + highlight = True + spelling_suggestions = True + table_results = query_large_table_results_model + suggested_refinements = query_large_suggested_refinements_model + passages = query_large_passages_model + similar = query_large_similar_model + + # Invoke method + response = _service.query( + project_id, + collection_ids=collection_ids, + filter=filter, + query=query, + natural_language_query=natural_language_query, + aggregation=aggregation, + count=count, + return_=return_, + offset=offset, + sort=sort, + highlight=highlight, + spelling_suggestions=spelling_suggestions, + table_results=table_results, + suggested_refinements=suggested_refinements, + passages=passages, + similar=similar, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['collection_ids'] == ['testString'] + assert req_body['filter'] == 'testString' + assert req_body['query'] == 'testString' + assert req_body['natural_language_query'] == 'testString' + assert req_body['aggregation'] == 'testString' + assert req_body['count'] == 38 + assert req_body['return'] == ['testString'] + assert req_body['offset'] == 38 + assert req_body['sort'] == 'testString' + assert req_body['highlight'] == True + assert req_body['spelling_suggestions'] == True + assert req_body['table_results'] == query_large_table_results_model + assert req_body['suggested_refinements'] == query_large_suggested_refinements_model + assert req_body['passages'] == query_large_passages_model + assert req_body['similar'] == query_large_similar_model + + def test_query_all_params_with_retries(self): + # Enable retries and run test_query_all_params. + _service.enable_retries() + self.test_query_all_params() + + # Disable retries and run test_query_all_params. + _service.disable_retries() + self.test_query_all_params() + + @responses.activate + def test_query_required_params(self): + """ + test_query_required_params() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/query') + mock_response = '{"matching_results": 16, "results": [{"document_id": "document_id", "metadata": {"anyKey": "anyValue"}, "result_metadata": {"document_retrieval_source": "search", "collection_id": "collection_id", "confidence": 0}, "document_passages": [{"passage_text": "passage_text", "start_offset": 12, "end_offset": 10, "field": "field", "answers": [{"answer_text": "answer_text", "start_offset": 12, "end_offset": 10, "confidence": 0}]}]}], "aggregations": [{"type": "term", "field": "field", "count": 5, "name": "name", "results": [{"key": "key", "matching_results": 16, "relevancy": 9, "total_matching_documents": 24, "estimated_matching_results": 26, "aggregations": [{"anyKey": "anyValue"}]}]}], "retrieval_details": {"document_retrieval_strategy": "untrained"}, "suggested_query": "suggested_query", "suggested_refinements": [{"text": "text"}], "table_results": [{"table_id": "table_id", "source_document_id": "source_document_id", "collection_id": "collection_id", "table_html": "table_html", "table_html_offset": 17, "table": {"location": {"begin": 5, "end": 3}, "text": "text", "section_title": {"text": "text", "location": {"begin": 5, "end": 3}}, "title": {"text": "text", "location": {"begin": 5, "end": 3}}, "table_headers": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16}], "row_headers": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text", "text_normalized": "text_normalized", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16}], "column_headers": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text", "text_normalized": "text_normalized", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16}], "key_value_pairs": [{"key": {"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text"}, "value": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text"}]}], "body_cells": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16, "row_header_ids": ["row_header_ids"], "row_header_texts": ["row_header_texts"], "row_header_texts_normalized": ["row_header_texts_normalized"], "column_header_ids": ["column_header_ids"], "column_header_texts": ["column_header_texts"], "column_header_texts_normalized": ["column_header_texts_normalized"], "attributes": [{"type": "type", "text": "text", "location": {"begin": 5, "end": 3}}]}], "contexts": [{"text": "text", "location": {"begin": 5, "end": 3}}]}}], "passages": [{"passage_text": "passage_text", "passage_score": 13, "document_id": "document_id", "collection_id": "collection_id", "start_offset": 12, "end_offset": 10, "field": "field", "answers": [{"answer_text": "answer_text", "start_offset": 12, "end_offset": 10, "confidence": 0}]}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + + # Invoke method + response = _service.query( + project_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_query_required_params_with_retries(self): + # Enable retries and run test_query_required_params. + _service.enable_retries() + self.test_query_required_params() + + # Disable retries and run test_query_required_params. + _service.disable_retries() + self.test_query_required_params() + + @responses.activate + def test_query_value_error(self): + """ + test_query_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/query') + mock_response = '{"matching_results": 16, "results": [{"document_id": "document_id", "metadata": {"anyKey": "anyValue"}, "result_metadata": {"document_retrieval_source": "search", "collection_id": "collection_id", "confidence": 0}, "document_passages": [{"passage_text": "passage_text", "start_offset": 12, "end_offset": 10, "field": "field", "answers": [{"answer_text": "answer_text", "start_offset": 12, "end_offset": 10, "confidence": 0}]}]}], "aggregations": [{"type": "term", "field": "field", "count": 5, "name": "name", "results": [{"key": "key", "matching_results": 16, "relevancy": 9, "total_matching_documents": 24, "estimated_matching_results": 26, "aggregations": [{"anyKey": "anyValue"}]}]}], "retrieval_details": {"document_retrieval_strategy": "untrained"}, "suggested_query": "suggested_query", "suggested_refinements": [{"text": "text"}], "table_results": [{"table_id": "table_id", "source_document_id": "source_document_id", "collection_id": "collection_id", "table_html": "table_html", "table_html_offset": 17, "table": {"location": {"begin": 5, "end": 3}, "text": "text", "section_title": {"text": "text", "location": {"begin": 5, "end": 3}}, "title": {"text": "text", "location": {"begin": 5, "end": 3}}, "table_headers": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16}], "row_headers": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text", "text_normalized": "text_normalized", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16}], "column_headers": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text", "text_normalized": "text_normalized", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16}], "key_value_pairs": [{"key": {"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text"}, "value": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text"}]}], "body_cells": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16, "row_header_ids": ["row_header_ids"], "row_header_texts": ["row_header_texts"], "row_header_texts_normalized": ["row_header_texts_normalized"], "column_header_ids": ["column_header_ids"], "column_header_texts": ["column_header_texts"], "column_header_texts_normalized": ["column_header_texts_normalized"], "attributes": [{"type": "type", "text": "text", "location": {"begin": 5, "end": 3}}]}], "contexts": [{"text": "text", "location": {"begin": 5, "end": 3}}]}}], "passages": [{"passage_text": "passage_text", "passage_score": 13, "document_id": "document_id", "collection_id": "collection_id", "start_offset": 12, "end_offset": 10, "field": "field", "answers": [{"answer_text": "answer_text", "start_offset": 12, "end_offset": 10, "confidence": 0}]}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.query(**req_copy) + + def test_query_value_error_with_retries(self): + # Enable retries and run test_query_value_error. + _service.enable_retries() + self.test_query_value_error() + + # Disable retries and run test_query_value_error. + _service.disable_retries() + self.test_query_value_error() + + +class TestGetAutocompletion: + """ + Test Class for get_autocompletion + """ + + @responses.activate + def test_get_autocompletion_all_params(self): + """ + get_autocompletion() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/autocompletion') + mock_response = '{"completions": ["completions"]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + prefix = 'testString' + collection_ids = ['testString'] + field = 'testString' + count = 5 + + # Invoke method + response = _service.get_autocompletion( + project_id, + prefix, + collection_ids=collection_ids, + field=field, + count=count, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'prefix={}'.format(prefix) in query_string + assert 'collection_ids={}'.format(','.join(collection_ids)) in query_string + assert 'field={}'.format(field) in query_string + assert 'count={}'.format(count) in query_string + + def test_get_autocompletion_all_params_with_retries(self): + # Enable retries and run test_get_autocompletion_all_params. + _service.enable_retries() + self.test_get_autocompletion_all_params() + + # Disable retries and run test_get_autocompletion_all_params. + _service.disable_retries() + self.test_get_autocompletion_all_params() + + @responses.activate + def test_get_autocompletion_required_params(self): + """ + test_get_autocompletion_required_params() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/autocompletion') + mock_response = '{"completions": ["completions"]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + prefix = 'testString' + + # Invoke method + response = _service.get_autocompletion( + project_id, + prefix, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'prefix={}'.format(prefix) in query_string + + def test_get_autocompletion_required_params_with_retries(self): + # Enable retries and run test_get_autocompletion_required_params. + _service.enable_retries() + self.test_get_autocompletion_required_params() + + # Disable retries and run test_get_autocompletion_required_params. + _service.disable_retries() + self.test_get_autocompletion_required_params() + + @responses.activate + def test_get_autocompletion_value_error(self): + """ + test_get_autocompletion_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/autocompletion') + mock_response = '{"completions": ["completions"]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + prefix = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "prefix": prefix, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_autocompletion(**req_copy) + + def test_get_autocompletion_value_error_with_retries(self): + # Enable retries and run test_get_autocompletion_value_error. + _service.enable_retries() + self.test_get_autocompletion_value_error() + + # Disable retries and run test_get_autocompletion_value_error. + _service.disable_retries() + self.test_get_autocompletion_value_error() + + +class TestQueryCollectionNotices: + """ + Test Class for query_collection_notices + """ + + @responses.activate + def test_query_collection_notices_all_params(self): + """ + query_collection_notices() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/notices') + mock_response = '{"matching_results": 16, "notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "collection_id": "collection_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + filter = 'testString' + query = 'testString' + natural_language_query = 'testString' + count = 10 + offset = 38 + + # Invoke method + response = _service.query_collection_notices( + project_id, + collection_id, + filter=filter, + query=query, + natural_language_query=natural_language_query, + count=count, + offset=offset, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'filter={}'.format(filter) in query_string + assert 'query={}'.format(query) in query_string + assert 'natural_language_query={}'.format(natural_language_query) in query_string + assert 'count={}'.format(count) in query_string + assert 'offset={}'.format(offset) in query_string + + def test_query_collection_notices_all_params_with_retries(self): + # Enable retries and run test_query_collection_notices_all_params. + _service.enable_retries() + self.test_query_collection_notices_all_params() + + # Disable retries and run test_query_collection_notices_all_params. + _service.disable_retries() + self.test_query_collection_notices_all_params() + + @responses.activate + def test_query_collection_notices_required_params(self): + """ + test_query_collection_notices_required_params() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/notices') + mock_response = '{"matching_results": 16, "notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "collection_id": "collection_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + + # Invoke method + response = _service.query_collection_notices( + project_id, + collection_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_query_collection_notices_required_params_with_retries(self): + # Enable retries and run test_query_collection_notices_required_params. + _service.enable_retries() + self.test_query_collection_notices_required_params() + + # Disable retries and run test_query_collection_notices_required_params. + _service.disable_retries() + self.test_query_collection_notices_required_params() + + @responses.activate + def test_query_collection_notices_value_error(self): + """ + test_query_collection_notices_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/notices') + mock_response = '{"matching_results": 16, "notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "collection_id": "collection_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "collection_id": collection_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.query_collection_notices(**req_copy) + + def test_query_collection_notices_value_error_with_retries(self): + # Enable retries and run test_query_collection_notices_value_error. + _service.enable_retries() + self.test_query_collection_notices_value_error() + + # Disable retries and run test_query_collection_notices_value_error. + _service.disable_retries() + self.test_query_collection_notices_value_error() + + +class TestQueryNotices: + """ + Test Class for query_notices + """ + + @responses.activate + def test_query_notices_all_params(self): + """ + query_notices() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/notices') + mock_response = '{"matching_results": 16, "notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "collection_id": "collection_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + filter = 'testString' + query = 'testString' + natural_language_query = 'testString' + count = 10 + offset = 38 + + # Invoke method + response = _service.query_notices( + project_id, + filter=filter, + query=query, + natural_language_query=natural_language_query, + count=count, + offset=offset, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'filter={}'.format(filter) in query_string + assert 'query={}'.format(query) in query_string + assert 'natural_language_query={}'.format(natural_language_query) in query_string + assert 'count={}'.format(count) in query_string + assert 'offset={}'.format(offset) in query_string + + def test_query_notices_all_params_with_retries(self): + # Enable retries and run test_query_notices_all_params. + _service.enable_retries() + self.test_query_notices_all_params() + + # Disable retries and run test_query_notices_all_params. + _service.disable_retries() + self.test_query_notices_all_params() + + @responses.activate + def test_query_notices_required_params(self): + """ + test_query_notices_required_params() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/notices') + mock_response = '{"matching_results": 16, "notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "collection_id": "collection_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + + # Invoke method + response = _service.query_notices( + project_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_query_notices_required_params_with_retries(self): + # Enable retries and run test_query_notices_required_params. + _service.enable_retries() + self.test_query_notices_required_params() + + # Disable retries and run test_query_notices_required_params. + _service.disable_retries() + self.test_query_notices_required_params() + + @responses.activate + def test_query_notices_value_error(self): + """ + test_query_notices_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/notices') + mock_response = '{"matching_results": 16, "notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "collection_id": "collection_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.query_notices(**req_copy) + + def test_query_notices_value_error_with_retries(self): + # Enable retries and run test_query_notices_value_error. + _service.enable_retries() + self.test_query_notices_value_error() + + # Disable retries and run test_query_notices_value_error. + _service.disable_retries() + self.test_query_notices_value_error() + + +# endregion +############################################################################## +# End of Service: Queries +############################################################################## + +############################################################################## +# Start of Service: QueryModifications +############################################################################## +# region + + +class TestGetStopwordList: + """ + Test Class for get_stopword_list + """ + + @responses.activate + def test_get_stopword_list_all_params(self): + """ + get_stopword_list() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/stopwords') + mock_response = '{"stopwords": ["stopwords"]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + + # Invoke method + response = _service.get_stopword_list( + project_id, + collection_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_get_stopword_list_all_params_with_retries(self): + # Enable retries and run test_get_stopword_list_all_params. + _service.enable_retries() + self.test_get_stopword_list_all_params() + + # Disable retries and run test_get_stopword_list_all_params. + _service.disable_retries() + self.test_get_stopword_list_all_params() + + @responses.activate + def test_get_stopword_list_value_error(self): + """ + test_get_stopword_list_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/stopwords') + mock_response = '{"stopwords": ["stopwords"]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "collection_id": collection_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_stopword_list(**req_copy) + + def test_get_stopword_list_value_error_with_retries(self): + # Enable retries and run test_get_stopword_list_value_error. + _service.enable_retries() + self.test_get_stopword_list_value_error() + + # Disable retries and run test_get_stopword_list_value_error. + _service.disable_retries() + self.test_get_stopword_list_value_error() + + +class TestCreateStopwordList: + """ + Test Class for create_stopword_list + """ + + @responses.activate + def test_create_stopword_list_all_params(self): + """ + create_stopword_list() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/stopwords') + mock_response = '{"stopwords": ["stopwords"]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + stopwords = ['testString'] + + # Invoke method + response = _service.create_stopword_list( + project_id, + collection_id, + stopwords=stopwords, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['stopwords'] == ['testString'] + + def test_create_stopword_list_all_params_with_retries(self): + # Enable retries and run test_create_stopword_list_all_params. + _service.enable_retries() + self.test_create_stopword_list_all_params() + + # Disable retries and run test_create_stopword_list_all_params. + _service.disable_retries() + self.test_create_stopword_list_all_params() + + @responses.activate + def test_create_stopword_list_required_params(self): + """ + test_create_stopword_list_required_params() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/stopwords') + mock_response = '{"stopwords": ["stopwords"]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + + # Invoke method + response = _service.create_stopword_list( + project_id, + collection_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_create_stopword_list_required_params_with_retries(self): + # Enable retries and run test_create_stopword_list_required_params. + _service.enable_retries() + self.test_create_stopword_list_required_params() + + # Disable retries and run test_create_stopword_list_required_params. + _service.disable_retries() + self.test_create_stopword_list_required_params() + + @responses.activate + def test_create_stopword_list_value_error(self): + """ + test_create_stopword_list_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/stopwords') + mock_response = '{"stopwords": ["stopwords"]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "collection_id": collection_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.create_stopword_list(**req_copy) + + def test_create_stopword_list_value_error_with_retries(self): + # Enable retries and run test_create_stopword_list_value_error. + _service.enable_retries() + self.test_create_stopword_list_value_error() + + # Disable retries and run test_create_stopword_list_value_error. + _service.disable_retries() + self.test_create_stopword_list_value_error() + + +class TestDeleteStopwordList: + """ + Test Class for delete_stopword_list + """ + + @responses.activate + def test_delete_stopword_list_all_params(self): + """ + delete_stopword_list() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/stopwords') + responses.add( + responses.DELETE, + url, + status=204, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + + # Invoke method + response = _service.delete_stopword_list( + project_id, + collection_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 204 + + def test_delete_stopword_list_all_params_with_retries(self): + # Enable retries and run test_delete_stopword_list_all_params. + _service.enable_retries() + self.test_delete_stopword_list_all_params() + + # Disable retries and run test_delete_stopword_list_all_params. + _service.disable_retries() + self.test_delete_stopword_list_all_params() + + @responses.activate + def test_delete_stopword_list_value_error(self): + """ + test_delete_stopword_list_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/stopwords') + responses.add( + responses.DELETE, + url, + status=204, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "collection_id": collection_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_stopword_list(**req_copy) + + def test_delete_stopword_list_value_error_with_retries(self): + # Enable retries and run test_delete_stopword_list_value_error. + _service.enable_retries() + self.test_delete_stopword_list_value_error() + + # Disable retries and run test_delete_stopword_list_value_error. + _service.disable_retries() + self.test_delete_stopword_list_value_error() + + +class TestListExpansions: + """ + Test Class for list_expansions + """ + + @responses.activate + def test_list_expansions_all_params(self): + """ + list_expansions() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/expansions') + mock_response = '{"expansions": [{"input_terms": ["input_terms"], "expanded_terms": ["expanded_terms"]}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + + # Invoke method + response = _service.list_expansions( + project_id, + collection_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_expansions_all_params_with_retries(self): + # Enable retries and run test_list_expansions_all_params. + _service.enable_retries() + self.test_list_expansions_all_params() + + # Disable retries and run test_list_expansions_all_params. + _service.disable_retries() + self.test_list_expansions_all_params() + + @responses.activate + def test_list_expansions_value_error(self): + """ + test_list_expansions_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/expansions') + mock_response = '{"expansions": [{"input_terms": ["input_terms"], "expanded_terms": ["expanded_terms"]}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "collection_id": collection_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.list_expansions(**req_copy) + + def test_list_expansions_value_error_with_retries(self): + # Enable retries and run test_list_expansions_value_error. + _service.enable_retries() + self.test_list_expansions_value_error() + + # Disable retries and run test_list_expansions_value_error. + _service.disable_retries() + self.test_list_expansions_value_error() + + +class TestCreateExpansions: + """ + Test Class for create_expansions + """ + + @responses.activate + def test_create_expansions_all_params(self): + """ + create_expansions() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/expansions') + mock_response = '{"expansions": [{"input_terms": ["input_terms"], "expanded_terms": ["expanded_terms"]}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Construct a dict representation of a Expansion model + expansion_model = {} + expansion_model['input_terms'] = ['testString'] + expansion_model['expanded_terms'] = ['testString'] + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + expansions = [expansion_model] + + # Invoke method + response = _service.create_expansions( + project_id, + collection_id, + expansions, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['expansions'] == [expansion_model] + + def test_create_expansions_all_params_with_retries(self): + # Enable retries and run test_create_expansions_all_params. + _service.enable_retries() + self.test_create_expansions_all_params() + + # Disable retries and run test_create_expansions_all_params. + _service.disable_retries() + self.test_create_expansions_all_params() + + @responses.activate + def test_create_expansions_value_error(self): + """ + test_create_expansions_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/expansions') + mock_response = '{"expansions": [{"input_terms": ["input_terms"], "expanded_terms": ["expanded_terms"]}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Construct a dict representation of a Expansion model + expansion_model = {} + expansion_model['input_terms'] = ['testString'] + expansion_model['expanded_terms'] = ['testString'] + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + expansions = [expansion_model] + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "collection_id": collection_id, + "expansions": expansions, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.create_expansions(**req_copy) + + def test_create_expansions_value_error_with_retries(self): + # Enable retries and run test_create_expansions_value_error. + _service.enable_retries() + self.test_create_expansions_value_error() + + # Disable retries and run test_create_expansions_value_error. + _service.disable_retries() + self.test_create_expansions_value_error() + + +class TestDeleteExpansions: + """ + Test Class for delete_expansions + """ + + @responses.activate + def test_delete_expansions_all_params(self): + """ + delete_expansions() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/expansions') + responses.add( + responses.DELETE, + url, + status=204, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + + # Invoke method + response = _service.delete_expansions( + project_id, + collection_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 204 + + def test_delete_expansions_all_params_with_retries(self): + # Enable retries and run test_delete_expansions_all_params. + _service.enable_retries() + self.test_delete_expansions_all_params() + + # Disable retries and run test_delete_expansions_all_params. + _service.disable_retries() + self.test_delete_expansions_all_params() + + @responses.activate + def test_delete_expansions_value_error(self): + """ + test_delete_expansions_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/expansions') + responses.add( + responses.DELETE, + url, + status=204, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "collection_id": collection_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_expansions(**req_copy) + + def test_delete_expansions_value_error_with_retries(self): + # Enable retries and run test_delete_expansions_value_error. + _service.enable_retries() + self.test_delete_expansions_value_error() + + # Disable retries and run test_delete_expansions_value_error. + _service.disable_retries() + self.test_delete_expansions_value_error() + + +# endregion +############################################################################## +# End of Service: QueryModifications +############################################################################## + +############################################################################## +# Start of Service: ComponentSettings +############################################################################## +# region + + +class TestGetComponentSettings: + """ + Test Class for get_component_settings + """ + + @responses.activate + def test_get_component_settings_all_params(self): + """ + get_component_settings() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/component_settings') + mock_response = '{"fields_shown": {"body": {"use_passage": false, "field": "field"}, "title": {"field": "field"}}, "autocomplete": true, "structured_search": false, "results_per_page": 16, "aggregations": [{"name": "name", "label": "label", "multiple_selections_allowed": false, "visualization_type": "auto"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + + # Invoke method + response = _service.get_component_settings( + project_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_get_component_settings_all_params_with_retries(self): + # Enable retries and run test_get_component_settings_all_params. + _service.enable_retries() + self.test_get_component_settings_all_params() + + # Disable retries and run test_get_component_settings_all_params. + _service.disable_retries() + self.test_get_component_settings_all_params() + + @responses.activate + def test_get_component_settings_value_error(self): + """ + test_get_component_settings_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/component_settings') + mock_response = '{"fields_shown": {"body": {"use_passage": false, "field": "field"}, "title": {"field": "field"}}, "autocomplete": true, "structured_search": false, "results_per_page": 16, "aggregations": [{"name": "name", "label": "label", "multiple_selections_allowed": false, "visualization_type": "auto"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_component_settings(**req_copy) + + def test_get_component_settings_value_error_with_retries(self): + # Enable retries and run test_get_component_settings_value_error. + _service.enable_retries() + self.test_get_component_settings_value_error() + + # Disable retries and run test_get_component_settings_value_error. + _service.disable_retries() + self.test_get_component_settings_value_error() + + +# endregion +############################################################################## +# End of Service: ComponentSettings +############################################################################## + +############################################################################## +# Start of Service: TrainingData +############################################################################## +# region + + +class TestListTrainingQueries: + """ + Test Class for list_training_queries + """ + + @responses.activate + def test_list_training_queries_all_params(self): + """ + list_training_queries() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/training_data/queries') + mock_response = '{"queries": [{"query_id": "query_id", "natural_language_query": "natural_language_query", "filter": "filter", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"document_id": "document_id", "collection_id": "collection_id", "relevance": 9, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + + # Invoke method + response = _service.list_training_queries( + project_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_training_queries_all_params_with_retries(self): + # Enable retries and run test_list_training_queries_all_params. + _service.enable_retries() + self.test_list_training_queries_all_params() + + # Disable retries and run test_list_training_queries_all_params. + _service.disable_retries() + self.test_list_training_queries_all_params() + + @responses.activate + def test_list_training_queries_value_error(self): + """ + test_list_training_queries_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/training_data/queries') + mock_response = '{"queries": [{"query_id": "query_id", "natural_language_query": "natural_language_query", "filter": "filter", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"document_id": "document_id", "collection_id": "collection_id", "relevance": 9, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.list_training_queries(**req_copy) + + def test_list_training_queries_value_error_with_retries(self): + # Enable retries and run test_list_training_queries_value_error. + _service.enable_retries() + self.test_list_training_queries_value_error() + + # Disable retries and run test_list_training_queries_value_error. + _service.disable_retries() + self.test_list_training_queries_value_error() + + +class TestDeleteTrainingQueries: + """ + Test Class for delete_training_queries + """ + + @responses.activate + def test_delete_training_queries_all_params(self): + """ + delete_training_queries() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/training_data/queries') + responses.add( + responses.DELETE, + url, + status=204, + ) + + # Set up parameter values + project_id = 'testString' + + # Invoke method + response = _service.delete_training_queries( + project_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 204 + + def test_delete_training_queries_all_params_with_retries(self): + # Enable retries and run test_delete_training_queries_all_params. + _service.enable_retries() + self.test_delete_training_queries_all_params() + + # Disable retries and run test_delete_training_queries_all_params. + _service.disable_retries() + self.test_delete_training_queries_all_params() + + @responses.activate + def test_delete_training_queries_value_error(self): + """ + test_delete_training_queries_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/training_data/queries') + responses.add( + responses.DELETE, + url, + status=204, + ) + + # Set up parameter values + project_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_training_queries(**req_copy) + + def test_delete_training_queries_value_error_with_retries(self): + # Enable retries and run test_delete_training_queries_value_error. + _service.enable_retries() + self.test_delete_training_queries_value_error() + + # Disable retries and run test_delete_training_queries_value_error. + _service.disable_retries() + self.test_delete_training_queries_value_error() + + +class TestCreateTrainingQuery: + """ + Test Class for create_training_query + """ + + @responses.activate + def test_create_training_query_all_params(self): + """ + create_training_query() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/training_data/queries') + mock_response = '{"query_id": "query_id", "natural_language_query": "natural_language_query", "filter": "filter", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"document_id": "document_id", "collection_id": "collection_id", "relevance": 9, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Construct a dict representation of a TrainingExample model + training_example_model = {} + training_example_model['document_id'] = 'testString' + training_example_model['collection_id'] = 'testString' + training_example_model['relevance'] = 38 + + # Set up parameter values + project_id = 'testString' + natural_language_query = 'testString' + examples = [training_example_model] + filter = 'testString' + + # Invoke method + response = _service.create_training_query( + project_id, + natural_language_query, + examples, + filter=filter, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['natural_language_query'] == 'testString' + assert req_body['examples'] == [training_example_model] + assert req_body['filter'] == 'testString' + + def test_create_training_query_all_params_with_retries(self): + # Enable retries and run test_create_training_query_all_params. + _service.enable_retries() + self.test_create_training_query_all_params() + + # Disable retries and run test_create_training_query_all_params. + _service.disable_retries() + self.test_create_training_query_all_params() + + @responses.activate + def test_create_training_query_value_error(self): + """ + test_create_training_query_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/training_data/queries') + mock_response = '{"query_id": "query_id", "natural_language_query": "natural_language_query", "filter": "filter", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"document_id": "document_id", "collection_id": "collection_id", "relevance": 9, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Construct a dict representation of a TrainingExample model + training_example_model = {} + training_example_model['document_id'] = 'testString' + training_example_model['collection_id'] = 'testString' + training_example_model['relevance'] = 38 + + # Set up parameter values + project_id = 'testString' + natural_language_query = 'testString' + examples = [training_example_model] + filter = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "natural_language_query": natural_language_query, + "examples": examples, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.create_training_query(**req_copy) + + def test_create_training_query_value_error_with_retries(self): + # Enable retries and run test_create_training_query_value_error. + _service.enable_retries() + self.test_create_training_query_value_error() + + # Disable retries and run test_create_training_query_value_error. + _service.disable_retries() + self.test_create_training_query_value_error() + + +class TestGetTrainingQuery: + """ + Test Class for get_training_query + """ + + @responses.activate + def test_get_training_query_all_params(self): + """ + get_training_query() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/training_data/queries/testString') + mock_response = '{"query_id": "query_id", "natural_language_query": "natural_language_query", "filter": "filter", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"document_id": "document_id", "collection_id": "collection_id", "relevance": 9, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + query_id = 'testString' + + # Invoke method + response = _service.get_training_query( + project_id, + query_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_get_training_query_all_params_with_retries(self): + # Enable retries and run test_get_training_query_all_params. + _service.enable_retries() + self.test_get_training_query_all_params() + + # Disable retries and run test_get_training_query_all_params. + _service.disable_retries() + self.test_get_training_query_all_params() + + @responses.activate + def test_get_training_query_value_error(self): + """ + test_get_training_query_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/training_data/queries/testString') + mock_response = '{"query_id": "query_id", "natural_language_query": "natural_language_query", "filter": "filter", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"document_id": "document_id", "collection_id": "collection_id", "relevance": 9, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + query_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "query_id": query_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_training_query(**req_copy) + + def test_get_training_query_value_error_with_retries(self): + # Enable retries and run test_get_training_query_value_error. + _service.enable_retries() + self.test_get_training_query_value_error() + + # Disable retries and run test_get_training_query_value_error. + _service.disable_retries() + self.test_get_training_query_value_error() + + +class TestUpdateTrainingQuery: + """ + Test Class for update_training_query + """ + + @responses.activate + def test_update_training_query_all_params(self): + """ + update_training_query() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/training_data/queries/testString') + mock_response = '{"query_id": "query_id", "natural_language_query": "natural_language_query", "filter": "filter", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"document_id": "document_id", "collection_id": "collection_id", "relevance": 9, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Construct a dict representation of a TrainingExample model + training_example_model = {} + training_example_model['document_id'] = 'testString' + training_example_model['collection_id'] = 'testString' + training_example_model['relevance'] = 38 + + # Set up parameter values + project_id = 'testString' + query_id = 'testString' + natural_language_query = 'testString' + examples = [training_example_model] + filter = 'testString' + + # Invoke method + response = _service.update_training_query( + project_id, + query_id, + natural_language_query, + examples, + filter=filter, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['natural_language_query'] == 'testString' + assert req_body['examples'] == [training_example_model] + assert req_body['filter'] == 'testString' + + def test_update_training_query_all_params_with_retries(self): + # Enable retries and run test_update_training_query_all_params. + _service.enable_retries() + self.test_update_training_query_all_params() + + # Disable retries and run test_update_training_query_all_params. + _service.disable_retries() + self.test_update_training_query_all_params() + + @responses.activate + def test_update_training_query_value_error(self): + """ + test_update_training_query_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/training_data/queries/testString') + mock_response = '{"query_id": "query_id", "natural_language_query": "natural_language_query", "filter": "filter", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"document_id": "document_id", "collection_id": "collection_id", "relevance": 9, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Construct a dict representation of a TrainingExample model + training_example_model = {} + training_example_model['document_id'] = 'testString' + training_example_model['collection_id'] = 'testString' + training_example_model['relevance'] = 38 + + # Set up parameter values + project_id = 'testString' + query_id = 'testString' + natural_language_query = 'testString' + examples = [training_example_model] + filter = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "query_id": query_id, + "natural_language_query": natural_language_query, + "examples": examples, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.update_training_query(**req_copy) + + def test_update_training_query_value_error_with_retries(self): + # Enable retries and run test_update_training_query_value_error. + _service.enable_retries() + self.test_update_training_query_value_error() + + # Disable retries and run test_update_training_query_value_error. + _service.disable_retries() + self.test_update_training_query_value_error() + + +class TestDeleteTrainingQuery: + """ + Test Class for delete_training_query + """ + + @responses.activate + def test_delete_training_query_all_params(self): + """ + delete_training_query() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/training_data/queries/testString') + responses.add( + responses.DELETE, + url, + status=204, + ) + + # Set up parameter values + project_id = 'testString' + query_id = 'testString' + + # Invoke method + response = _service.delete_training_query( + project_id, + query_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 204 + + def test_delete_training_query_all_params_with_retries(self): + # Enable retries and run test_delete_training_query_all_params. + _service.enable_retries() + self.test_delete_training_query_all_params() + + # Disable retries and run test_delete_training_query_all_params. + _service.disable_retries() + self.test_delete_training_query_all_params() + + @responses.activate + def test_delete_training_query_value_error(self): + """ + test_delete_training_query_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/training_data/queries/testString') + responses.add( + responses.DELETE, + url, + status=204, + ) + + # Set up parameter values + project_id = 'testString' + query_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "query_id": query_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_training_query(**req_copy) + + def test_delete_training_query_value_error_with_retries(self): + # Enable retries and run test_delete_training_query_value_error. + _service.enable_retries() + self.test_delete_training_query_value_error() + + # Disable retries and run test_delete_training_query_value_error. + _service.disable_retries() + self.test_delete_training_query_value_error() + + +# endregion +############################################################################## +# End of Service: TrainingData +############################################################################## + +############################################################################## +# Start of Service: Enrichments +############################################################################## +# region + + +class TestListEnrichments: + """ + Test Class for list_enrichments + """ + + @responses.activate + def test_list_enrichments_all_params(self): + """ + list_enrichments() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/enrichments') + mock_response = '{"enrichments": [{"enrichment_id": "enrichment_id", "name": "name", "description": "description", "type": "part_of_speech", "options": {"languages": ["languages"], "entity_type": "entity_type", "regular_expression": "regular_expression", "result_field": "result_field", "classifier_id": "classifier_id", "model_id": "model_id", "confidence_threshold": 0, "top_k": 0, "url": "url", "version": "2023-03-31", "secret": "secret", "headers": {"name": "name", "value": "value"}, "location_encoding": "`utf-16`"}}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + + # Invoke method + response = _service.list_enrichments( + project_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_enrichments_all_params_with_retries(self): + # Enable retries and run test_list_enrichments_all_params. + _service.enable_retries() + self.test_list_enrichments_all_params() + + # Disable retries and run test_list_enrichments_all_params. + _service.disable_retries() + self.test_list_enrichments_all_params() + + @responses.activate + def test_list_enrichments_value_error(self): + """ + test_list_enrichments_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/enrichments') + mock_response = '{"enrichments": [{"enrichment_id": "enrichment_id", "name": "name", "description": "description", "type": "part_of_speech", "options": {"languages": ["languages"], "entity_type": "entity_type", "regular_expression": "regular_expression", "result_field": "result_field", "classifier_id": "classifier_id", "model_id": "model_id", "confidence_threshold": 0, "top_k": 0, "url": "url", "version": "2023-03-31", "secret": "secret", "headers": {"name": "name", "value": "value"}, "location_encoding": "`utf-16`"}}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.list_enrichments(**req_copy) + + def test_list_enrichments_value_error_with_retries(self): + # Enable retries and run test_list_enrichments_value_error. + _service.enable_retries() + self.test_list_enrichments_value_error() + + # Disable retries and run test_list_enrichments_value_error. + _service.disable_retries() + self.test_list_enrichments_value_error() + + +class TestCreateEnrichment: + """ + Test Class for create_enrichment + """ + + @responses.activate + def test_create_enrichment_all_params(self): + """ + create_enrichment() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/enrichments') + mock_response = '{"enrichment_id": "enrichment_id", "name": "name", "description": "description", "type": "part_of_speech", "options": {"languages": ["languages"], "entity_type": "entity_type", "regular_expression": "regular_expression", "result_field": "result_field", "classifier_id": "classifier_id", "model_id": "model_id", "confidence_threshold": 0, "top_k": 0, "url": "url", "version": "2023-03-31", "secret": "secret", "headers": {"name": "name", "value": "value"}, "location_encoding": "`utf-16`"}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Construct a dict representation of a WebhookHeader model + webhook_header_model = {} + webhook_header_model['name'] = 'testString' + webhook_header_model['value'] = 'testString' + + # Construct a dict representation of a EnrichmentOptions model + enrichment_options_model = {} + enrichment_options_model['languages'] = ['testString'] + enrichment_options_model['entity_type'] = 'testString' + enrichment_options_model['regular_expression'] = 'testString' + enrichment_options_model['result_field'] = 'testString' + enrichment_options_model['classifier_id'] = 'testString' + enrichment_options_model['model_id'] = 'testString' + enrichment_options_model['confidence_threshold'] = 0 + enrichment_options_model['top_k'] = 0 + enrichment_options_model['url'] = 'testString' + enrichment_options_model['version'] = '2023-03-31' + enrichment_options_model['secret'] = 'testString' + enrichment_options_model['headers'] = webhook_header_model + enrichment_options_model['location_encoding'] = '`utf-16`' + + # Construct a dict representation of a CreateEnrichment model + create_enrichment_model = {} + create_enrichment_model['name'] = 'testString' + create_enrichment_model['description'] = 'testString' + create_enrichment_model['type'] = 'classifier' + create_enrichment_model['options'] = enrichment_options_model + + # Set up parameter values + project_id = 'testString' + enrichment = create_enrichment_model + file = io.BytesIO(b'This is a mock file.').getvalue() + + # Invoke method + response = _service.create_enrichment( + project_id, + enrichment, + file=file, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + + def test_create_enrichment_all_params_with_retries(self): + # Enable retries and run test_create_enrichment_all_params. + _service.enable_retries() + self.test_create_enrichment_all_params() + + # Disable retries and run test_create_enrichment_all_params. + _service.disable_retries() + self.test_create_enrichment_all_params() + + @responses.activate + def test_create_enrichment_required_params(self): + """ + test_create_enrichment_required_params() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/enrichments') + mock_response = '{"enrichment_id": "enrichment_id", "name": "name", "description": "description", "type": "part_of_speech", "options": {"languages": ["languages"], "entity_type": "entity_type", "regular_expression": "regular_expression", "result_field": "result_field", "classifier_id": "classifier_id", "model_id": "model_id", "confidence_threshold": 0, "top_k": 0, "url": "url", "version": "2023-03-31", "secret": "secret", "headers": {"name": "name", "value": "value"}, "location_encoding": "`utf-16`"}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Construct a dict representation of a WebhookHeader model + webhook_header_model = {} + webhook_header_model['name'] = 'testString' + webhook_header_model['value'] = 'testString' + + # Construct a dict representation of a EnrichmentOptions model + enrichment_options_model = {} + enrichment_options_model['languages'] = ['testString'] + enrichment_options_model['entity_type'] = 'testString' + enrichment_options_model['regular_expression'] = 'testString' + enrichment_options_model['result_field'] = 'testString' + enrichment_options_model['classifier_id'] = 'testString' + enrichment_options_model['model_id'] = 'testString' + enrichment_options_model['confidence_threshold'] = 0 + enrichment_options_model['top_k'] = 0 + enrichment_options_model['url'] = 'testString' + enrichment_options_model['version'] = '2023-03-31' + enrichment_options_model['secret'] = 'testString' + enrichment_options_model['headers'] = webhook_header_model + enrichment_options_model['location_encoding'] = '`utf-16`' + + # Construct a dict representation of a CreateEnrichment model + create_enrichment_model = {} + create_enrichment_model['name'] = 'testString' + create_enrichment_model['description'] = 'testString' + create_enrichment_model['type'] = 'classifier' + create_enrichment_model['options'] = enrichment_options_model + + # Set up parameter values + project_id = 'testString' + enrichment = create_enrichment_model + + # Invoke method + response = _service.create_enrichment( + project_id, + enrichment, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + + def test_create_enrichment_required_params_with_retries(self): + # Enable retries and run test_create_enrichment_required_params. + _service.enable_retries() + self.test_create_enrichment_required_params() + + # Disable retries and run test_create_enrichment_required_params. + _service.disable_retries() + self.test_create_enrichment_required_params() + + @responses.activate + def test_create_enrichment_value_error(self): + """ + test_create_enrichment_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/enrichments') + mock_response = '{"enrichment_id": "enrichment_id", "name": "name", "description": "description", "type": "part_of_speech", "options": {"languages": ["languages"], "entity_type": "entity_type", "regular_expression": "regular_expression", "result_field": "result_field", "classifier_id": "classifier_id", "model_id": "model_id", "confidence_threshold": 0, "top_k": 0, "url": "url", "version": "2023-03-31", "secret": "secret", "headers": {"name": "name", "value": "value"}, "location_encoding": "`utf-16`"}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Construct a dict representation of a WebhookHeader model + webhook_header_model = {} + webhook_header_model['name'] = 'testString' + webhook_header_model['value'] = 'testString' + + # Construct a dict representation of a EnrichmentOptions model + enrichment_options_model = {} + enrichment_options_model['languages'] = ['testString'] + enrichment_options_model['entity_type'] = 'testString' + enrichment_options_model['regular_expression'] = 'testString' + enrichment_options_model['result_field'] = 'testString' + enrichment_options_model['classifier_id'] = 'testString' + enrichment_options_model['model_id'] = 'testString' + enrichment_options_model['confidence_threshold'] = 0 + enrichment_options_model['top_k'] = 0 + enrichment_options_model['url'] = 'testString' + enrichment_options_model['version'] = '2023-03-31' + enrichment_options_model['secret'] = 'testString' + enrichment_options_model['headers'] = webhook_header_model + enrichment_options_model['location_encoding'] = '`utf-16`' + + # Construct a dict representation of a CreateEnrichment model + create_enrichment_model = {} + create_enrichment_model['name'] = 'testString' + create_enrichment_model['description'] = 'testString' + create_enrichment_model['type'] = 'classifier' + create_enrichment_model['options'] = enrichment_options_model + + # Set up parameter values + project_id = 'testString' + enrichment = create_enrichment_model + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "enrichment": enrichment, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.create_enrichment(**req_copy) + + def test_create_enrichment_value_error_with_retries(self): + # Enable retries and run test_create_enrichment_value_error. + _service.enable_retries() + self.test_create_enrichment_value_error() + + # Disable retries and run test_create_enrichment_value_error. + _service.disable_retries() + self.test_create_enrichment_value_error() + + +class TestGetEnrichment: + """ + Test Class for get_enrichment + """ + + @responses.activate + def test_get_enrichment_all_params(self): + """ + get_enrichment() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/enrichments/testString') + mock_response = '{"enrichment_id": "enrichment_id", "name": "name", "description": "description", "type": "part_of_speech", "options": {"languages": ["languages"], "entity_type": "entity_type", "regular_expression": "regular_expression", "result_field": "result_field", "classifier_id": "classifier_id", "model_id": "model_id", "confidence_threshold": 0, "top_k": 0, "url": "url", "version": "2023-03-31", "secret": "secret", "headers": {"name": "name", "value": "value"}, "location_encoding": "`utf-16`"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + enrichment_id = 'testString' + + # Invoke method + response = _service.get_enrichment( + project_id, + enrichment_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_get_enrichment_all_params_with_retries(self): + # Enable retries and run test_get_enrichment_all_params. + _service.enable_retries() + self.test_get_enrichment_all_params() + + # Disable retries and run test_get_enrichment_all_params. + _service.disable_retries() + self.test_get_enrichment_all_params() + + @responses.activate + def test_get_enrichment_value_error(self): + """ + test_get_enrichment_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/enrichments/testString') + mock_response = '{"enrichment_id": "enrichment_id", "name": "name", "description": "description", "type": "part_of_speech", "options": {"languages": ["languages"], "entity_type": "entity_type", "regular_expression": "regular_expression", "result_field": "result_field", "classifier_id": "classifier_id", "model_id": "model_id", "confidence_threshold": 0, "top_k": 0, "url": "url", "version": "2023-03-31", "secret": "secret", "headers": {"name": "name", "value": "value"}, "location_encoding": "`utf-16`"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + enrichment_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "enrichment_id": enrichment_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_enrichment(**req_copy) + + def test_get_enrichment_value_error_with_retries(self): + # Enable retries and run test_get_enrichment_value_error. + _service.enable_retries() + self.test_get_enrichment_value_error() + + # Disable retries and run test_get_enrichment_value_error. + _service.disable_retries() + self.test_get_enrichment_value_error() + + +class TestUpdateEnrichment: + """ + Test Class for update_enrichment + """ + + @responses.activate + def test_update_enrichment_all_params(self): + """ + update_enrichment() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/enrichments/testString') + mock_response = '{"enrichment_id": "enrichment_id", "name": "name", "description": "description", "type": "part_of_speech", "options": {"languages": ["languages"], "entity_type": "entity_type", "regular_expression": "regular_expression", "result_field": "result_field", "classifier_id": "classifier_id", "model_id": "model_id", "confidence_threshold": 0, "top_k": 0, "url": "url", "version": "2023-03-31", "secret": "secret", "headers": {"name": "name", "value": "value"}, "location_encoding": "`utf-16`"}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + enrichment_id = 'testString' + name = 'testString' + description = 'testString' + + # Invoke method + response = _service.update_enrichment( + project_id, + enrichment_id, + name, + description=description, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['name'] == 'testString' + assert req_body['description'] == 'testString' + + def test_update_enrichment_all_params_with_retries(self): + # Enable retries and run test_update_enrichment_all_params. + _service.enable_retries() + self.test_update_enrichment_all_params() + + # Disable retries and run test_update_enrichment_all_params. + _service.disable_retries() + self.test_update_enrichment_all_params() + + @responses.activate + def test_update_enrichment_value_error(self): + """ + test_update_enrichment_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/enrichments/testString') + mock_response = '{"enrichment_id": "enrichment_id", "name": "name", "description": "description", "type": "part_of_speech", "options": {"languages": ["languages"], "entity_type": "entity_type", "regular_expression": "regular_expression", "result_field": "result_field", "classifier_id": "classifier_id", "model_id": "model_id", "confidence_threshold": 0, "top_k": 0, "url": "url", "version": "2023-03-31", "secret": "secret", "headers": {"name": "name", "value": "value"}, "location_encoding": "`utf-16`"}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + enrichment_id = 'testString' + name = 'testString' + description = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "enrichment_id": enrichment_id, + "name": name, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.update_enrichment(**req_copy) + + def test_update_enrichment_value_error_with_retries(self): + # Enable retries and run test_update_enrichment_value_error. + _service.enable_retries() + self.test_update_enrichment_value_error() + + # Disable retries and run test_update_enrichment_value_error. + _service.disable_retries() + self.test_update_enrichment_value_error() + + +class TestDeleteEnrichment: + """ + Test Class for delete_enrichment + """ + + @responses.activate + def test_delete_enrichment_all_params(self): + """ + delete_enrichment() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/enrichments/testString') + responses.add( + responses.DELETE, + url, + status=204, + ) + + # Set up parameter values + project_id = 'testString' + enrichment_id = 'testString' + + # Invoke method + response = _service.delete_enrichment( + project_id, + enrichment_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 204 + + def test_delete_enrichment_all_params_with_retries(self): + # Enable retries and run test_delete_enrichment_all_params. + _service.enable_retries() + self.test_delete_enrichment_all_params() + + # Disable retries and run test_delete_enrichment_all_params. + _service.disable_retries() + self.test_delete_enrichment_all_params() + + @responses.activate + def test_delete_enrichment_value_error(self): + """ + test_delete_enrichment_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/enrichments/testString') + responses.add( + responses.DELETE, + url, + status=204, + ) + + # Set up parameter values + project_id = 'testString' + enrichment_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "enrichment_id": enrichment_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_enrichment(**req_copy) + + def test_delete_enrichment_value_error_with_retries(self): + # Enable retries and run test_delete_enrichment_value_error. + _service.enable_retries() + self.test_delete_enrichment_value_error() + + # Disable retries and run test_delete_enrichment_value_error. + _service.disable_retries() + self.test_delete_enrichment_value_error() + + +# endregion +############################################################################## +# End of Service: Enrichments +############################################################################## + +############################################################################## +# Start of Service: Batches +############################################################################## +# region + + +class TestListBatches: + """ + Test Class for list_batches + """ + + @responses.activate + def test_list_batches_all_params(self): + """ + list_batches() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/batches') + mock_response = '{"batches": [{"batch_id": "batch_id", "created": "2019-01-01T12:00:00.000Z", "enrichment_id": "enrichment_id"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + + # Invoke method + response = _service.list_batches( + project_id, + collection_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_batches_all_params_with_retries(self): + # Enable retries and run test_list_batches_all_params. + _service.enable_retries() + self.test_list_batches_all_params() + + # Disable retries and run test_list_batches_all_params. + _service.disable_retries() + self.test_list_batches_all_params() + + @responses.activate + def test_list_batches_value_error(self): + """ + test_list_batches_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/batches') + mock_response = '{"batches": [{"batch_id": "batch_id", "created": "2019-01-01T12:00:00.000Z", "enrichment_id": "enrichment_id"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "collection_id": collection_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.list_batches(**req_copy) + + def test_list_batches_value_error_with_retries(self): + # Enable retries and run test_list_batches_value_error. + _service.enable_retries() + self.test_list_batches_value_error() + + # Disable retries and run test_list_batches_value_error. + _service.disable_retries() + self.test_list_batches_value_error() + + +class TestPullBatches: + """ + Test Class for pull_batches + """ + + @responses.activate + def test_pull_batches_all_params(self): + """ + pull_batches() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/batches/testString') + mock_response = '{"file": "file"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + batch_id = 'testString' + + # Invoke method + response = _service.pull_batches( + project_id, + collection_id, + batch_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_pull_batches_all_params_with_retries(self): + # Enable retries and run test_pull_batches_all_params. + _service.enable_retries() + self.test_pull_batches_all_params() + + # Disable retries and run test_pull_batches_all_params. + _service.disable_retries() + self.test_pull_batches_all_params() + + @responses.activate + def test_pull_batches_value_error(self): + """ + test_pull_batches_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/batches/testString') + mock_response = '{"file": "file"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + batch_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "collection_id": collection_id, + "batch_id": batch_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.pull_batches(**req_copy) + + def test_pull_batches_value_error_with_retries(self): + # Enable retries and run test_pull_batches_value_error. + _service.enable_retries() + self.test_pull_batches_value_error() + + # Disable retries and run test_pull_batches_value_error. + _service.disable_retries() + self.test_pull_batches_value_error() + + +class TestPushBatches: + """ + Test Class for push_batches + """ + + @responses.activate + def test_push_batches_all_params(self): + """ + push_batches() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/batches/testString') + mock_response = 'false' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=202, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + batch_id = 'testString' + file = io.BytesIO(b'This is a mock file.').getvalue() + filename = 'testString' + + # Invoke method + response = _service.push_batches( + project_id, + collection_id, + batch_id, + file=file, + filename=filename, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 202 + + def test_push_batches_all_params_with_retries(self): + # Enable retries and run test_push_batches_all_params. + _service.enable_retries() + self.test_push_batches_all_params() + + # Disable retries and run test_push_batches_all_params. + _service.disable_retries() + self.test_push_batches_all_params() + + @responses.activate + def test_push_batches_required_params(self): + """ + test_push_batches_required_params() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/batches/testString') + mock_response = 'false' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=202, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + batch_id = 'testString' + + # Invoke method + response = _service.push_batches( + project_id, + collection_id, + batch_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 202 + + def test_push_batches_required_params_with_retries(self): + # Enable retries and run test_push_batches_required_params. + _service.enable_retries() + self.test_push_batches_required_params() + + # Disable retries and run test_push_batches_required_params. + _service.disable_retries() + self.test_push_batches_required_params() + + @responses.activate + def test_push_batches_value_error(self): + """ + test_push_batches_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/batches/testString') + mock_response = 'false' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=202, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + batch_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "collection_id": collection_id, + "batch_id": batch_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.push_batches(**req_copy) + + def test_push_batches_value_error_with_retries(self): + # Enable retries and run test_push_batches_value_error. + _service.enable_retries() + self.test_push_batches_value_error() + + # Disable retries and run test_push_batches_value_error. + _service.disable_retries() + self.test_push_batches_value_error() + + +# endregion +############################################################################## +# End of Service: Batches +############################################################################## + +############################################################################## +# Start of Service: DocumentClassifiers +############################################################################## +# region + + +class TestListDocumentClassifiers: + """ + Test Class for list_document_classifiers + """ + + @responses.activate + def test_list_document_classifiers_all_params(self): + """ + list_document_classifiers() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/document_classifiers') + mock_response = '{"classifiers": [{"classifier_id": "classifier_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}], "recognized_fields": ["recognized_fields"], "answer_field": "answer_field", "training_data_file": "training_data_file", "test_data_file": "test_data_file", "federated_classification": {"field": "field"}}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + + # Invoke method + response = _service.list_document_classifiers( + project_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_document_classifiers_all_params_with_retries(self): + # Enable retries and run test_list_document_classifiers_all_params. + _service.enable_retries() + self.test_list_document_classifiers_all_params() + + # Disable retries and run test_list_document_classifiers_all_params. + _service.disable_retries() + self.test_list_document_classifiers_all_params() + + @responses.activate + def test_list_document_classifiers_value_error(self): + """ + test_list_document_classifiers_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/document_classifiers') + mock_response = '{"classifiers": [{"classifier_id": "classifier_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}], "recognized_fields": ["recognized_fields"], "answer_field": "answer_field", "training_data_file": "training_data_file", "test_data_file": "test_data_file", "federated_classification": {"field": "field"}}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.list_document_classifiers(**req_copy) + + def test_list_document_classifiers_value_error_with_retries(self): + # Enable retries and run test_list_document_classifiers_value_error. + _service.enable_retries() + self.test_list_document_classifiers_value_error() + + # Disable retries and run test_list_document_classifiers_value_error. + _service.disable_retries() + self.test_list_document_classifiers_value_error() + + +class TestCreateDocumentClassifier: + """ + Test Class for create_document_classifier + """ + + @responses.activate + def test_create_document_classifier_all_params(self): + """ + create_document_classifier() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/document_classifiers') + mock_response = '{"classifier_id": "classifier_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}], "recognized_fields": ["recognized_fields"], "answer_field": "answer_field", "training_data_file": "training_data_file", "test_data_file": "test_data_file", "federated_classification": {"field": "field"}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Construct a dict representation of a DocumentClassifierEnrichment model + document_classifier_enrichment_model = {} + document_classifier_enrichment_model['enrichment_id'] = 'testString' + document_classifier_enrichment_model['fields'] = ['testString'] + + # Construct a dict representation of a ClassifierFederatedModel model + classifier_federated_model_model = {} + classifier_federated_model_model['field'] = 'testString' + + # Construct a dict representation of a CreateDocumentClassifier model + create_document_classifier_model = {} + create_document_classifier_model['name'] = 'testString' + create_document_classifier_model['description'] = 'testString' + create_document_classifier_model['language'] = 'en' + create_document_classifier_model['answer_field'] = 'testString' + create_document_classifier_model['enrichments'] = [document_classifier_enrichment_model] + create_document_classifier_model['federated_classification'] = classifier_federated_model_model + + # Set up parameter values + project_id = 'testString' + training_data = io.BytesIO(b'This is a mock file.').getvalue() + classifier = create_document_classifier_model + test_data = io.BytesIO(b'This is a mock file.').getvalue() + + # Invoke method + response = _service.create_document_classifier( + project_id, + training_data, + classifier, + test_data=test_data, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + + def test_create_document_classifier_all_params_with_retries(self): + # Enable retries and run test_create_document_classifier_all_params. + _service.enable_retries() + self.test_create_document_classifier_all_params() + + # Disable retries and run test_create_document_classifier_all_params. + _service.disable_retries() + self.test_create_document_classifier_all_params() + + @responses.activate + def test_create_document_classifier_required_params(self): + """ + test_create_document_classifier_required_params() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/document_classifiers') + mock_response = '{"classifier_id": "classifier_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}], "recognized_fields": ["recognized_fields"], "answer_field": "answer_field", "training_data_file": "training_data_file", "test_data_file": "test_data_file", "federated_classification": {"field": "field"}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Construct a dict representation of a DocumentClassifierEnrichment model + document_classifier_enrichment_model = {} + document_classifier_enrichment_model['enrichment_id'] = 'testString' + document_classifier_enrichment_model['fields'] = ['testString'] + + # Construct a dict representation of a ClassifierFederatedModel model + classifier_federated_model_model = {} + classifier_federated_model_model['field'] = 'testString' + + # Construct a dict representation of a CreateDocumentClassifier model + create_document_classifier_model = {} + create_document_classifier_model['name'] = 'testString' + create_document_classifier_model['description'] = 'testString' + create_document_classifier_model['language'] = 'en' + create_document_classifier_model['answer_field'] = 'testString' + create_document_classifier_model['enrichments'] = [document_classifier_enrichment_model] + create_document_classifier_model['federated_classification'] = classifier_federated_model_model + + # Set up parameter values + project_id = 'testString' + training_data = io.BytesIO(b'This is a mock file.').getvalue() + classifier = create_document_classifier_model + + # Invoke method + response = _service.create_document_classifier( + project_id, + training_data, + classifier, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + + def test_create_document_classifier_required_params_with_retries(self): + # Enable retries and run test_create_document_classifier_required_params. + _service.enable_retries() + self.test_create_document_classifier_required_params() + + # Disable retries and run test_create_document_classifier_required_params. + _service.disable_retries() + self.test_create_document_classifier_required_params() + + @responses.activate + def test_create_document_classifier_value_error(self): + """ + test_create_document_classifier_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/document_classifiers') + mock_response = '{"classifier_id": "classifier_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}], "recognized_fields": ["recognized_fields"], "answer_field": "answer_field", "training_data_file": "training_data_file", "test_data_file": "test_data_file", "federated_classification": {"field": "field"}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Construct a dict representation of a DocumentClassifierEnrichment model + document_classifier_enrichment_model = {} + document_classifier_enrichment_model['enrichment_id'] = 'testString' + document_classifier_enrichment_model['fields'] = ['testString'] + + # Construct a dict representation of a ClassifierFederatedModel model + classifier_federated_model_model = {} + classifier_federated_model_model['field'] = 'testString' + + # Construct a dict representation of a CreateDocumentClassifier model + create_document_classifier_model = {} + create_document_classifier_model['name'] = 'testString' + create_document_classifier_model['description'] = 'testString' + create_document_classifier_model['language'] = 'en' + create_document_classifier_model['answer_field'] = 'testString' + create_document_classifier_model['enrichments'] = [document_classifier_enrichment_model] + create_document_classifier_model['federated_classification'] = classifier_federated_model_model + + # Set up parameter values + project_id = 'testString' + training_data = io.BytesIO(b'This is a mock file.').getvalue() + classifier = create_document_classifier_model + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "training_data": training_data, + "classifier": classifier, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.create_document_classifier(**req_copy) + + def test_create_document_classifier_value_error_with_retries(self): + # Enable retries and run test_create_document_classifier_value_error. + _service.enable_retries() + self.test_create_document_classifier_value_error() + + # Disable retries and run test_create_document_classifier_value_error. + _service.disable_retries() + self.test_create_document_classifier_value_error() + + +class TestGetDocumentClassifier: + """ + Test Class for get_document_classifier + """ + + @responses.activate + def test_get_document_classifier_all_params(self): + """ + get_document_classifier() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/document_classifiers/testString') + mock_response = '{"classifier_id": "classifier_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}], "recognized_fields": ["recognized_fields"], "answer_field": "answer_field", "training_data_file": "training_data_file", "test_data_file": "test_data_file", "federated_classification": {"field": "field"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + classifier_id = 'testString' + + # Invoke method + response = _service.get_document_classifier( + project_id, + classifier_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_get_document_classifier_all_params_with_retries(self): + # Enable retries and run test_get_document_classifier_all_params. + _service.enable_retries() + self.test_get_document_classifier_all_params() + + # Disable retries and run test_get_document_classifier_all_params. + _service.disable_retries() + self.test_get_document_classifier_all_params() + + @responses.activate + def test_get_document_classifier_value_error(self): + """ + test_get_document_classifier_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/document_classifiers/testString') + mock_response = '{"classifier_id": "classifier_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}], "recognized_fields": ["recognized_fields"], "answer_field": "answer_field", "training_data_file": "training_data_file", "test_data_file": "test_data_file", "federated_classification": {"field": "field"}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + classifier_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "classifier_id": classifier_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_document_classifier(**req_copy) + + def test_get_document_classifier_value_error_with_retries(self): + # Enable retries and run test_get_document_classifier_value_error. + _service.enable_retries() + self.test_get_document_classifier_value_error() + + # Disable retries and run test_get_document_classifier_value_error. + _service.disable_retries() + self.test_get_document_classifier_value_error() + + +class TestUpdateDocumentClassifier: + """ + Test Class for update_document_classifier + """ + + @responses.activate + def test_update_document_classifier_all_params(self): + """ + update_document_classifier() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/document_classifiers/testString') + mock_response = '{"classifier_id": "classifier_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}], "recognized_fields": ["recognized_fields"], "answer_field": "answer_field", "training_data_file": "training_data_file", "test_data_file": "test_data_file", "federated_classification": {"field": "field"}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Construct a dict representation of a UpdateDocumentClassifier model + update_document_classifier_model = {} + update_document_classifier_model['name'] = 'testString' + update_document_classifier_model['description'] = 'testString' + + # Set up parameter values + project_id = 'testString' + classifier_id = 'testString' + classifier = update_document_classifier_model + training_data = io.BytesIO(b'This is a mock file.').getvalue() + test_data = io.BytesIO(b'This is a mock file.').getvalue() + + # Invoke method + response = _service.update_document_classifier( + project_id, + classifier_id, + classifier, + training_data=training_data, + test_data=test_data, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + + def test_update_document_classifier_all_params_with_retries(self): + # Enable retries and run test_update_document_classifier_all_params. + _service.enable_retries() + self.test_update_document_classifier_all_params() + + # Disable retries and run test_update_document_classifier_all_params. + _service.disable_retries() + self.test_update_document_classifier_all_params() + + @responses.activate + def test_update_document_classifier_required_params(self): + """ + test_update_document_classifier_required_params() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/document_classifiers/testString') + mock_response = '{"classifier_id": "classifier_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}], "recognized_fields": ["recognized_fields"], "answer_field": "answer_field", "training_data_file": "training_data_file", "test_data_file": "test_data_file", "federated_classification": {"field": "field"}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Construct a dict representation of a UpdateDocumentClassifier model + update_document_classifier_model = {} + update_document_classifier_model['name'] = 'testString' + update_document_classifier_model['description'] = 'testString' + + # Set up parameter values + project_id = 'testString' + classifier_id = 'testString' + classifier = update_document_classifier_model + + # Invoke method + response = _service.update_document_classifier( + project_id, + classifier_id, + classifier, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + + def test_update_document_classifier_required_params_with_retries(self): + # Enable retries and run test_update_document_classifier_required_params. + _service.enable_retries() + self.test_update_document_classifier_required_params() + + # Disable retries and run test_update_document_classifier_required_params. + _service.disable_retries() + self.test_update_document_classifier_required_params() + + @responses.activate + def test_update_document_classifier_value_error(self): + """ + test_update_document_classifier_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/document_classifiers/testString') + mock_response = '{"classifier_id": "classifier_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}], "recognized_fields": ["recognized_fields"], "answer_field": "answer_field", "training_data_file": "training_data_file", "test_data_file": "test_data_file", "federated_classification": {"field": "field"}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Construct a dict representation of a UpdateDocumentClassifier model + update_document_classifier_model = {} + update_document_classifier_model['name'] = 'testString' + update_document_classifier_model['description'] = 'testString' + + # Set up parameter values + project_id = 'testString' + classifier_id = 'testString' + classifier = update_document_classifier_model + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "classifier_id": classifier_id, + "classifier": classifier, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.update_document_classifier(**req_copy) + + def test_update_document_classifier_value_error_with_retries(self): + # Enable retries and run test_update_document_classifier_value_error. + _service.enable_retries() + self.test_update_document_classifier_value_error() + + # Disable retries and run test_update_document_classifier_value_error. + _service.disable_retries() + self.test_update_document_classifier_value_error() + + +class TestDeleteDocumentClassifier: + """ + Test Class for delete_document_classifier + """ + + @responses.activate + def test_delete_document_classifier_all_params(self): + """ + delete_document_classifier() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/document_classifiers/testString') + responses.add( + responses.DELETE, + url, + status=204, + ) + + # Set up parameter values + project_id = 'testString' + classifier_id = 'testString' + + # Invoke method + response = _service.delete_document_classifier( + project_id, + classifier_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 204 + + def test_delete_document_classifier_all_params_with_retries(self): + # Enable retries and run test_delete_document_classifier_all_params. + _service.enable_retries() + self.test_delete_document_classifier_all_params() + + # Disable retries and run test_delete_document_classifier_all_params. + _service.disable_retries() + self.test_delete_document_classifier_all_params() + + @responses.activate + def test_delete_document_classifier_value_error(self): + """ + test_delete_document_classifier_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/document_classifiers/testString') + responses.add( + responses.DELETE, + url, + status=204, + ) + + # Set up parameter values + project_id = 'testString' + classifier_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "classifier_id": classifier_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_document_classifier(**req_copy) + + def test_delete_document_classifier_value_error_with_retries(self): + # Enable retries and run test_delete_document_classifier_value_error. + _service.enable_retries() + self.test_delete_document_classifier_value_error() + + # Disable retries and run test_delete_document_classifier_value_error. + _service.disable_retries() + self.test_delete_document_classifier_value_error() + + +# endregion +############################################################################## +# End of Service: DocumentClassifiers +############################################################################## + +############################################################################## +# Start of Service: DocumentClassifierModels +############################################################################## +# region + + +class TestListDocumentClassifierModels: + """ + Test Class for list_document_classifier_models + """ + + @responses.activate + def test_list_document_classifier_models_all_params(self): + """ + list_document_classifier_models() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/document_classifiers/testString/models') + mock_response = '{"models": [{"model_id": "model_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "training_data_file": "training_data_file", "test_data_file": "test_data_file", "status": "training", "evaluation": {"micro_average": {"precision": 0, "recall": 0, "f1": 0}, "macro_average": {"precision": 0, "recall": 0, "f1": 0}, "per_class": [{"name": "name", "precision": 0, "recall": 0, "f1": 0}]}, "enrichment_id": "enrichment_id", "deployed_at": "2019-01-01T12:00:00.000Z"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + classifier_id = 'testString' + + # Invoke method + response = _service.list_document_classifier_models( + project_id, + classifier_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_document_classifier_models_all_params_with_retries(self): + # Enable retries and run test_list_document_classifier_models_all_params. + _service.enable_retries() + self.test_list_document_classifier_models_all_params() + + # Disable retries and run test_list_document_classifier_models_all_params. + _service.disable_retries() + self.test_list_document_classifier_models_all_params() + + @responses.activate + def test_list_document_classifier_models_value_error(self): + """ + test_list_document_classifier_models_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/document_classifiers/testString/models') + mock_response = '{"models": [{"model_id": "model_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "training_data_file": "training_data_file", "test_data_file": "test_data_file", "status": "training", "evaluation": {"micro_average": {"precision": 0, "recall": 0, "f1": 0}, "macro_average": {"precision": 0, "recall": 0, "f1": 0}, "per_class": [{"name": "name", "precision": 0, "recall": 0, "f1": 0}]}, "enrichment_id": "enrichment_id", "deployed_at": "2019-01-01T12:00:00.000Z"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + classifier_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "classifier_id": classifier_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.list_document_classifier_models(**req_copy) + + def test_list_document_classifier_models_value_error_with_retries(self): + # Enable retries and run test_list_document_classifier_models_value_error. + _service.enable_retries() + self.test_list_document_classifier_models_value_error() + + # Disable retries and run test_list_document_classifier_models_value_error. + _service.disable_retries() + self.test_list_document_classifier_models_value_error() + + +class TestCreateDocumentClassifierModel: + """ + Test Class for create_document_classifier_model + """ + + @responses.activate + def test_create_document_classifier_model_all_params(self): + """ + create_document_classifier_model() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/document_classifiers/testString/models') + mock_response = '{"model_id": "model_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "training_data_file": "training_data_file", "test_data_file": "test_data_file", "status": "training", "evaluation": {"micro_average": {"precision": 0, "recall": 0, "f1": 0}, "macro_average": {"precision": 0, "recall": 0, "f1": 0}, "per_class": [{"name": "name", "precision": 0, "recall": 0, "f1": 0}]}, "enrichment_id": "enrichment_id", "deployed_at": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Set up parameter values + project_id = 'testString' + classifier_id = 'testString' + name = 'testString' + description = 'testString' + learning_rate = 0.1 + l1_regularization_strengths = [1.0E-6] + l2_regularization_strengths = [1.0E-6] + training_max_steps = 10000000 + improvement_ratio = 0.000010 + + # Invoke method + response = _service.create_document_classifier_model( + project_id, + classifier_id, + name, + description=description, + learning_rate=learning_rate, + l1_regularization_strengths=l1_regularization_strengths, + l2_regularization_strengths=l2_regularization_strengths, + training_max_steps=training_max_steps, + improvement_ratio=improvement_ratio, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['name'] == 'testString' + assert req_body['description'] == 'testString' + assert req_body['learning_rate'] == 0.1 + assert req_body['l1_regularization_strengths'] == [1.0E-6] + assert req_body['l2_regularization_strengths'] == [1.0E-6] + assert req_body['training_max_steps'] == 10000000 + assert req_body['improvement_ratio'] == 0.000010 + + def test_create_document_classifier_model_all_params_with_retries(self): + # Enable retries and run test_create_document_classifier_model_all_params. + _service.enable_retries() + self.test_create_document_classifier_model_all_params() + + # Disable retries and run test_create_document_classifier_model_all_params. + _service.disable_retries() + self.test_create_document_classifier_model_all_params() + + @responses.activate + def test_create_document_classifier_model_value_error(self): + """ + test_create_document_classifier_model_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/document_classifiers/testString/models') + mock_response = '{"model_id": "model_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "training_data_file": "training_data_file", "test_data_file": "test_data_file", "status": "training", "evaluation": {"micro_average": {"precision": 0, "recall": 0, "f1": 0}, "macro_average": {"precision": 0, "recall": 0, "f1": 0}, "per_class": [{"name": "name", "precision": 0, "recall": 0, "f1": 0}]}, "enrichment_id": "enrichment_id", "deployed_at": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Set up parameter values + project_id = 'testString' + classifier_id = 'testString' + name = 'testString' + description = 'testString' + learning_rate = 0.1 + l1_regularization_strengths = [1.0E-6] + l2_regularization_strengths = [1.0E-6] + training_max_steps = 10000000 + improvement_ratio = 0.000010 + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "classifier_id": classifier_id, + "name": name, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.create_document_classifier_model(**req_copy) + + def test_create_document_classifier_model_value_error_with_retries(self): + # Enable retries and run test_create_document_classifier_model_value_error. + _service.enable_retries() + self.test_create_document_classifier_model_value_error() + + # Disable retries and run test_create_document_classifier_model_value_error. + _service.disable_retries() + self.test_create_document_classifier_model_value_error() + + +class TestGetDocumentClassifierModel: + """ + Test Class for get_document_classifier_model + """ + + @responses.activate + def test_get_document_classifier_model_all_params(self): + """ + get_document_classifier_model() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/document_classifiers/testString/models/testString') + mock_response = '{"model_id": "model_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "training_data_file": "training_data_file", "test_data_file": "test_data_file", "status": "training", "evaluation": {"micro_average": {"precision": 0, "recall": 0, "f1": 0}, "macro_average": {"precision": 0, "recall": 0, "f1": 0}, "per_class": [{"name": "name", "precision": 0, "recall": 0, "f1": 0}]}, "enrichment_id": "enrichment_id", "deployed_at": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + classifier_id = 'testString' + model_id = 'testString' + + # Invoke method + response = _service.get_document_classifier_model( + project_id, + classifier_id, + model_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_get_document_classifier_model_all_params_with_retries(self): + # Enable retries and run test_get_document_classifier_model_all_params. + _service.enable_retries() + self.test_get_document_classifier_model_all_params() + + # Disable retries and run test_get_document_classifier_model_all_params. + _service.disable_retries() + self.test_get_document_classifier_model_all_params() + + @responses.activate + def test_get_document_classifier_model_value_error(self): + """ + test_get_document_classifier_model_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/document_classifiers/testString/models/testString') + mock_response = '{"model_id": "model_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "training_data_file": "training_data_file", "test_data_file": "test_data_file", "status": "training", "evaluation": {"micro_average": {"precision": 0, "recall": 0, "f1": 0}, "macro_average": {"precision": 0, "recall": 0, "f1": 0}, "per_class": [{"name": "name", "precision": 0, "recall": 0, "f1": 0}]}, "enrichment_id": "enrichment_id", "deployed_at": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + classifier_id = 'testString' + model_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "classifier_id": classifier_id, + "model_id": model_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_document_classifier_model(**req_copy) + + def test_get_document_classifier_model_value_error_with_retries(self): + # Enable retries and run test_get_document_classifier_model_value_error. + _service.enable_retries() + self.test_get_document_classifier_model_value_error() + + # Disable retries and run test_get_document_classifier_model_value_error. + _service.disable_retries() + self.test_get_document_classifier_model_value_error() + + +class TestUpdateDocumentClassifierModel: + """ + Test Class for update_document_classifier_model + """ + + @responses.activate + def test_update_document_classifier_model_all_params(self): + """ + update_document_classifier_model() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/document_classifiers/testString/models/testString') + mock_response = '{"model_id": "model_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "training_data_file": "training_data_file", "test_data_file": "test_data_file", "status": "training", "evaluation": {"micro_average": {"precision": 0, "recall": 0, "f1": 0}, "macro_average": {"precision": 0, "recall": 0, "f1": 0}, "per_class": [{"name": "name", "precision": 0, "recall": 0, "f1": 0}]}, "enrichment_id": "enrichment_id", "deployed_at": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Set up parameter values + project_id = 'testString' + classifier_id = 'testString' + model_id = 'testString' + name = 'testString' + description = 'testString' + + # Invoke method + response = _service.update_document_classifier_model( + project_id, + classifier_id, + model_id, + name=name, + description=description, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['name'] == 'testString' + assert req_body['description'] == 'testString' + + def test_update_document_classifier_model_all_params_with_retries(self): + # Enable retries and run test_update_document_classifier_model_all_params. + _service.enable_retries() + self.test_update_document_classifier_model_all_params() + + # Disable retries and run test_update_document_classifier_model_all_params. + _service.disable_retries() + self.test_update_document_classifier_model_all_params() + + @responses.activate + def test_update_document_classifier_model_value_error(self): + """ + test_update_document_classifier_model_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/document_classifiers/testString/models/testString') + mock_response = '{"model_id": "model_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "training_data_file": "training_data_file", "test_data_file": "test_data_file", "status": "training", "evaluation": {"micro_average": {"precision": 0, "recall": 0, "f1": 0}, "macro_average": {"precision": 0, "recall": 0, "f1": 0}, "per_class": [{"name": "name", "precision": 0, "recall": 0, "f1": 0}]}, "enrichment_id": "enrichment_id", "deployed_at": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Set up parameter values + project_id = 'testString' + classifier_id = 'testString' + model_id = 'testString' + name = 'testString' + description = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "classifier_id": classifier_id, + "model_id": model_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.update_document_classifier_model(**req_copy) + + def test_update_document_classifier_model_value_error_with_retries(self): + # Enable retries and run test_update_document_classifier_model_value_error. + _service.enable_retries() + self.test_update_document_classifier_model_value_error() + + # Disable retries and run test_update_document_classifier_model_value_error. + _service.disable_retries() + self.test_update_document_classifier_model_value_error() + + +class TestDeleteDocumentClassifierModel: + """ + Test Class for delete_document_classifier_model + """ + + @responses.activate + def test_delete_document_classifier_model_all_params(self): + """ + delete_document_classifier_model() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/document_classifiers/testString/models/testString') + responses.add( + responses.DELETE, + url, + status=204, + ) + + # Set up parameter values + project_id = 'testString' + classifier_id = 'testString' + model_id = 'testString' + + # Invoke method + response = _service.delete_document_classifier_model( + project_id, + classifier_id, + model_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 204 + + def test_delete_document_classifier_model_all_params_with_retries(self): + # Enable retries and run test_delete_document_classifier_model_all_params. + _service.enable_retries() + self.test_delete_document_classifier_model_all_params() + + # Disable retries and run test_delete_document_classifier_model_all_params. + _service.disable_retries() + self.test_delete_document_classifier_model_all_params() + + @responses.activate + def test_delete_document_classifier_model_value_error(self): + """ + test_delete_document_classifier_model_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/document_classifiers/testString/models/testString') + responses.add( + responses.DELETE, + url, + status=204, + ) + + # Set up parameter values + project_id = 'testString' + classifier_id = 'testString' + model_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "classifier_id": classifier_id, + "model_id": model_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_document_classifier_model(**req_copy) + + def test_delete_document_classifier_model_value_error_with_retries(self): + # Enable retries and run test_delete_document_classifier_model_value_error. + _service.enable_retries() + self.test_delete_document_classifier_model_value_error() + + # Disable retries and run test_delete_document_classifier_model_value_error. + _service.disable_retries() + self.test_delete_document_classifier_model_value_error() + + +# endregion +############################################################################## +# End of Service: DocumentClassifierModels +############################################################################## + +############################################################################## +# Start of Service: Analyze +############################################################################## +# region + + +class TestAnalyzeDocument: + """ + Test Class for analyze_document + """ + + @responses.activate + def test_analyze_document_all_params(self): + """ + analyze_document() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/analyze') + mock_response = '{"notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "collection_id": "collection_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}], "result": {"metadata": {"anyKey": "anyValue"}}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + file = io.BytesIO(b'This is a mock file.').getvalue() + filename = 'testString' + file_content_type = 'application/json' + metadata = 'testString' + + # Invoke method + response = _service.analyze_document( + project_id, + collection_id, + file=file, + filename=filename, + file_content_type=file_content_type, + metadata=metadata, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_analyze_document_all_params_with_retries(self): + # Enable retries and run test_analyze_document_all_params. + _service.enable_retries() + self.test_analyze_document_all_params() + + # Disable retries and run test_analyze_document_all_params. + _service.disable_retries() + self.test_analyze_document_all_params() + + @responses.activate + def test_analyze_document_required_params(self): + """ + test_analyze_document_required_params() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/analyze') + mock_response = '{"notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "collection_id": "collection_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}], "result": {"metadata": {"anyKey": "anyValue"}}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + + # Invoke method + response = _service.analyze_document( + project_id, + collection_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_analyze_document_required_params_with_retries(self): + # Enable retries and run test_analyze_document_required_params. + _service.enable_retries() + self.test_analyze_document_required_params() + + # Disable retries and run test_analyze_document_required_params. + _service.disable_retries() + self.test_analyze_document_required_params() + + @responses.activate + def test_analyze_document_value_error(self): + """ + test_analyze_document_value_error() + """ + # Set up mock + url = preprocess_url('/v2/projects/testString/collections/testString/analyze') + mock_response = '{"notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "collection_id": "collection_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}], "result": {"metadata": {"anyKey": "anyValue"}}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + project_id = 'testString' + collection_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "project_id": project_id, + "collection_id": collection_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.analyze_document(**req_copy) + + def test_analyze_document_value_error_with_retries(self): + # Enable retries and run test_analyze_document_value_error. + _service.enable_retries() + self.test_analyze_document_value_error() + + # Disable retries and run test_analyze_document_value_error. + _service.disable_retries() + self.test_analyze_document_value_error() + + +# endregion +############################################################################## +# End of Service: Analyze +############################################################################## + +############################################################################## +# Start of Service: UserData +############################################################################## +# region + + +class TestDeleteUserData: + """ + Test Class for delete_user_data + """ + + @responses.activate + def test_delete_user_data_all_params(self): + """ + delete_user_data() + """ + # Set up mock + url = preprocess_url('/v2/user_data') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + customer_id = 'testString' + + # Invoke method + response = _service.delete_user_data( + customer_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'customer_id={}'.format(customer_id) in query_string + + def test_delete_user_data_all_params_with_retries(self): + # Enable retries and run test_delete_user_data_all_params. + _service.enable_retries() + self.test_delete_user_data_all_params() + + # Disable retries and run test_delete_user_data_all_params. + _service.disable_retries() + self.test_delete_user_data_all_params() + + @responses.activate + def test_delete_user_data_value_error(self): + """ + test_delete_user_data_value_error() + """ + # Set up mock + url = preprocess_url('/v2/user_data') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + customer_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customer_id": customer_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_user_data(**req_copy) + + def test_delete_user_data_value_error_with_retries(self): + # Enable retries and run test_delete_user_data_value_error. + _service.enable_retries() + self.test_delete_user_data_value_error() + + # Disable retries and run test_delete_user_data_value_error. + _service.disable_retries() + self.test_delete_user_data_value_error() + + +# endregion +############################################################################## +# End of Service: UserData +############################################################################## + + +############################################################################## +# Start of Model Tests +############################################################################## +# region + + +class TestModel_AnalyzedDocument: + """ + Test Class for AnalyzedDocument + """ + + def test_analyzed_document_serialization(self): + """ + Test serialization/deserialization for AnalyzedDocument + """ + + # Construct dict forms of any model objects needed in order to build this model. + + notice_model = {} # Notice + + analyzed_result_model = {} # AnalyzedResult + analyzed_result_model['metadata'] = {'anyKey': 'anyValue'} + analyzed_result_model['foo'] = 'testString' + + # Construct a json representation of a AnalyzedDocument model + analyzed_document_model_json = {} + analyzed_document_model_json['notices'] = [notice_model] + analyzed_document_model_json['result'] = analyzed_result_model + + # Construct a model instance of AnalyzedDocument by calling from_dict on the json representation + analyzed_document_model = AnalyzedDocument.from_dict(analyzed_document_model_json) + assert analyzed_document_model != False + + # Construct a model instance of AnalyzedDocument by calling from_dict on the json representation + analyzed_document_model_dict = AnalyzedDocument.from_dict(analyzed_document_model_json).__dict__ + analyzed_document_model2 = AnalyzedDocument(**analyzed_document_model_dict) + + # Verify the model instances are equivalent + assert analyzed_document_model == analyzed_document_model2 + + # Convert model instance back to dict and verify no loss of data + analyzed_document_model_json2 = analyzed_document_model.to_dict() + assert analyzed_document_model_json2 == analyzed_document_model_json + + +class TestModel_AnalyzedResult: + """ + Test Class for AnalyzedResult + """ + + def test_analyzed_result_serialization(self): + """ + Test serialization/deserialization for AnalyzedResult + """ + + # Construct a json representation of a AnalyzedResult model + analyzed_result_model_json = {} + analyzed_result_model_json['metadata'] = {'anyKey': 'anyValue'} + analyzed_result_model_json['foo'] = 'testString' + + # Construct a model instance of AnalyzedResult by calling from_dict on the json representation + analyzed_result_model = AnalyzedResult.from_dict(analyzed_result_model_json) + assert analyzed_result_model != False + + # Construct a model instance of AnalyzedResult by calling from_dict on the json representation + analyzed_result_model_dict = AnalyzedResult.from_dict(analyzed_result_model_json).__dict__ + analyzed_result_model2 = AnalyzedResult(**analyzed_result_model_dict) + + # Verify the model instances are equivalent + assert analyzed_result_model == analyzed_result_model2 + + # Convert model instance back to dict and verify no loss of data + analyzed_result_model_json2 = analyzed_result_model.to_dict() + assert analyzed_result_model_json2 == analyzed_result_model_json + + # Test get_properties and set_properties methods. + analyzed_result_model.set_properties({}) + actual_dict = analyzed_result_model.get_properties() + assert actual_dict == {} + + expected_dict = {'foo': 'testString'} + analyzed_result_model.set_properties(expected_dict) + actual_dict = analyzed_result_model.get_properties() + assert actual_dict.keys() == expected_dict.keys() + + +class TestModel_BatchDetails: + """ + Test Class for BatchDetails + """ + + def test_batch_details_serialization(self): + """ + Test serialization/deserialization for BatchDetails + """ + + # Construct a json representation of a BatchDetails model + batch_details_model_json = {} + batch_details_model_json['enrichment_id'] = 'testString' + + # Construct a model instance of BatchDetails by calling from_dict on the json representation + batch_details_model = BatchDetails.from_dict(batch_details_model_json) + assert batch_details_model != False + + # Construct a model instance of BatchDetails by calling from_dict on the json representation + batch_details_model_dict = BatchDetails.from_dict(batch_details_model_json).__dict__ + batch_details_model2 = BatchDetails(**batch_details_model_dict) + + # Verify the model instances are equivalent + assert batch_details_model == batch_details_model2 + + # Convert model instance back to dict and verify no loss of data + batch_details_model_json2 = batch_details_model.to_dict() + assert batch_details_model_json2 == batch_details_model_json + + +class TestModel_ClassifierFederatedModel: + """ + Test Class for ClassifierFederatedModel + """ + + def test_classifier_federated_model_serialization(self): + """ + Test serialization/deserialization for ClassifierFederatedModel + """ + + # Construct a json representation of a ClassifierFederatedModel model + classifier_federated_model_model_json = {} + classifier_federated_model_model_json['field'] = 'testString' + + # Construct a model instance of ClassifierFederatedModel by calling from_dict on the json representation + classifier_federated_model_model = ClassifierFederatedModel.from_dict(classifier_federated_model_model_json) + assert classifier_federated_model_model != False + + # Construct a model instance of ClassifierFederatedModel by calling from_dict on the json representation + classifier_federated_model_model_dict = ClassifierFederatedModel.from_dict(classifier_federated_model_model_json).__dict__ + classifier_federated_model_model2 = ClassifierFederatedModel(**classifier_federated_model_model_dict) + + # Verify the model instances are equivalent + assert classifier_federated_model_model == classifier_federated_model_model2 + + # Convert model instance back to dict and verify no loss of data + classifier_federated_model_model_json2 = classifier_federated_model_model.to_dict() + assert classifier_federated_model_model_json2 == classifier_federated_model_model_json + + +class TestModel_ClassifierModelEvaluation: + """ + Test Class for ClassifierModelEvaluation + """ + + def test_classifier_model_evaluation_serialization(self): + """ + Test serialization/deserialization for ClassifierModelEvaluation + """ + + # Construct dict forms of any model objects needed in order to build this model. + + model_evaluation_micro_average_model = {} # ModelEvaluationMicroAverage + model_evaluation_micro_average_model['precision'] = 0 + model_evaluation_micro_average_model['recall'] = 0 + model_evaluation_micro_average_model['f1'] = 0 + + model_evaluation_macro_average_model = {} # ModelEvaluationMacroAverage + model_evaluation_macro_average_model['precision'] = 0 + model_evaluation_macro_average_model['recall'] = 0 + model_evaluation_macro_average_model['f1'] = 0 + + per_class_model_evaluation_model = {} # PerClassModelEvaluation + per_class_model_evaluation_model['name'] = 'testString' + per_class_model_evaluation_model['precision'] = 0 + per_class_model_evaluation_model['recall'] = 0 + per_class_model_evaluation_model['f1'] = 0 + + # Construct a json representation of a ClassifierModelEvaluation model + classifier_model_evaluation_model_json = {} + classifier_model_evaluation_model_json['micro_average'] = model_evaluation_micro_average_model + classifier_model_evaluation_model_json['macro_average'] = model_evaluation_macro_average_model + classifier_model_evaluation_model_json['per_class'] = [per_class_model_evaluation_model] + + # Construct a model instance of ClassifierModelEvaluation by calling from_dict on the json representation + classifier_model_evaluation_model = ClassifierModelEvaluation.from_dict(classifier_model_evaluation_model_json) + assert classifier_model_evaluation_model != False + + # Construct a model instance of ClassifierModelEvaluation by calling from_dict on the json representation + classifier_model_evaluation_model_dict = ClassifierModelEvaluation.from_dict(classifier_model_evaluation_model_json).__dict__ + classifier_model_evaluation_model2 = ClassifierModelEvaluation(**classifier_model_evaluation_model_dict) + + # Verify the model instances are equivalent + assert classifier_model_evaluation_model == classifier_model_evaluation_model2 + + # Convert model instance back to dict and verify no loss of data + classifier_model_evaluation_model_json2 = classifier_model_evaluation_model.to_dict() + assert classifier_model_evaluation_model_json2 == classifier_model_evaluation_model_json + + +class TestModel_Collection: + """ + Test Class for Collection + """ + + def test_collection_serialization(self): + """ + Test serialization/deserialization for Collection + """ + + # Construct a json representation of a Collection model + collection_model_json = {} + collection_model_json['name'] = 'testString' + + # Construct a model instance of Collection by calling from_dict on the json representation + collection_model = Collection.from_dict(collection_model_json) + assert collection_model != False + + # Construct a model instance of Collection by calling from_dict on the json representation + collection_model_dict = Collection.from_dict(collection_model_json).__dict__ + collection_model2 = Collection(**collection_model_dict) + + # Verify the model instances are equivalent + assert collection_model == collection_model2 + + # Convert model instance back to dict and verify no loss of data + collection_model_json2 = collection_model.to_dict() + assert collection_model_json2 == collection_model_json + + +class TestModel_CollectionDetails: + """ + Test Class for CollectionDetails + """ + + def test_collection_details_serialization(self): + """ + Test serialization/deserialization for CollectionDetails + """ + + # Construct dict forms of any model objects needed in order to build this model. + + collection_enrichment_model = {} # CollectionEnrichment + collection_enrichment_model['enrichment_id'] = 'testString' + collection_enrichment_model['fields'] = ['testString'] + + # Construct a json representation of a CollectionDetails model + collection_details_model_json = {} + collection_details_model_json['name'] = 'testString' + collection_details_model_json['description'] = 'testString' + collection_details_model_json['language'] = 'en' + collection_details_model_json['ocr_enabled'] = False + collection_details_model_json['enrichments'] = [collection_enrichment_model] + + # Construct a model instance of CollectionDetails by calling from_dict on the json representation + collection_details_model = CollectionDetails.from_dict(collection_details_model_json) + assert collection_details_model != False + + # Construct a model instance of CollectionDetails by calling from_dict on the json representation + collection_details_model_dict = CollectionDetails.from_dict(collection_details_model_json).__dict__ + collection_details_model2 = CollectionDetails(**collection_details_model_dict) + + # Verify the model instances are equivalent + assert collection_details_model == collection_details_model2 + + # Convert model instance back to dict and verify no loss of data + collection_details_model_json2 = collection_details_model.to_dict() + assert collection_details_model_json2 == collection_details_model_json + + +class TestModel_CollectionDetailsSmartDocumentUnderstanding: + """ + Test Class for CollectionDetailsSmartDocumentUnderstanding + """ + + def test_collection_details_smart_document_understanding_serialization(self): + """ + Test serialization/deserialization for CollectionDetailsSmartDocumentUnderstanding + """ + + # Construct a json representation of a CollectionDetailsSmartDocumentUnderstanding model + collection_details_smart_document_understanding_model_json = {} + collection_details_smart_document_understanding_model_json['enabled'] = True + collection_details_smart_document_understanding_model_json['model'] = 'custom' + + # Construct a model instance of CollectionDetailsSmartDocumentUnderstanding by calling from_dict on the json representation + collection_details_smart_document_understanding_model = CollectionDetailsSmartDocumentUnderstanding.from_dict(collection_details_smart_document_understanding_model_json) + assert collection_details_smart_document_understanding_model != False + + # Construct a model instance of CollectionDetailsSmartDocumentUnderstanding by calling from_dict on the json representation + collection_details_smart_document_understanding_model_dict = CollectionDetailsSmartDocumentUnderstanding.from_dict(collection_details_smart_document_understanding_model_json).__dict__ + collection_details_smart_document_understanding_model2 = CollectionDetailsSmartDocumentUnderstanding(**collection_details_smart_document_understanding_model_dict) + + # Verify the model instances are equivalent + assert collection_details_smart_document_understanding_model == collection_details_smart_document_understanding_model2 + + # Convert model instance back to dict and verify no loss of data + collection_details_smart_document_understanding_model_json2 = collection_details_smart_document_understanding_model.to_dict() + assert collection_details_smart_document_understanding_model_json2 == collection_details_smart_document_understanding_model_json + + +class TestModel_CollectionEnrichment: + """ + Test Class for CollectionEnrichment + """ + + def test_collection_enrichment_serialization(self): + """ + Test serialization/deserialization for CollectionEnrichment + """ + + # Construct a json representation of a CollectionEnrichment model + collection_enrichment_model_json = {} + collection_enrichment_model_json['enrichment_id'] = 'testString' + collection_enrichment_model_json['fields'] = ['testString'] + + # Construct a model instance of CollectionEnrichment by calling from_dict on the json representation + collection_enrichment_model = CollectionEnrichment.from_dict(collection_enrichment_model_json) + assert collection_enrichment_model != False + + # Construct a model instance of CollectionEnrichment by calling from_dict on the json representation + collection_enrichment_model_dict = CollectionEnrichment.from_dict(collection_enrichment_model_json).__dict__ + collection_enrichment_model2 = CollectionEnrichment(**collection_enrichment_model_dict) + + # Verify the model instances are equivalent + assert collection_enrichment_model == collection_enrichment_model2 + + # Convert model instance back to dict and verify no loss of data + collection_enrichment_model_json2 = collection_enrichment_model.to_dict() + assert collection_enrichment_model_json2 == collection_enrichment_model_json + + +class TestModel_Completions: + """ + Test Class for Completions + """ + + def test_completions_serialization(self): + """ + Test serialization/deserialization for Completions + """ + + # Construct a json representation of a Completions model + completions_model_json = {} + completions_model_json['completions'] = ['testString'] + + # Construct a model instance of Completions by calling from_dict on the json representation + completions_model = Completions.from_dict(completions_model_json) + assert completions_model != False + + # Construct a model instance of Completions by calling from_dict on the json representation + completions_model_dict = Completions.from_dict(completions_model_json).__dict__ + completions_model2 = Completions(**completions_model_dict) + + # Verify the model instances are equivalent + assert completions_model == completions_model2 + + # Convert model instance back to dict and verify no loss of data + completions_model_json2 = completions_model.to_dict() + assert completions_model_json2 == completions_model_json + + +class TestModel_ComponentSettingsAggregation: + """ + Test Class for ComponentSettingsAggregation + """ + + def test_component_settings_aggregation_serialization(self): + """ + Test serialization/deserialization for ComponentSettingsAggregation + """ + + # Construct a json representation of a ComponentSettingsAggregation model + component_settings_aggregation_model_json = {} + component_settings_aggregation_model_json['name'] = 'testString' + component_settings_aggregation_model_json['label'] = 'testString' + component_settings_aggregation_model_json['multiple_selections_allowed'] = True + component_settings_aggregation_model_json['visualization_type'] = 'auto' + + # Construct a model instance of ComponentSettingsAggregation by calling from_dict on the json representation + component_settings_aggregation_model = ComponentSettingsAggregation.from_dict(component_settings_aggregation_model_json) + assert component_settings_aggregation_model != False + + # Construct a model instance of ComponentSettingsAggregation by calling from_dict on the json representation + component_settings_aggregation_model_dict = ComponentSettingsAggregation.from_dict(component_settings_aggregation_model_json).__dict__ + component_settings_aggregation_model2 = ComponentSettingsAggregation(**component_settings_aggregation_model_dict) + + # Verify the model instances are equivalent + assert component_settings_aggregation_model == component_settings_aggregation_model2 + + # Convert model instance back to dict and verify no loss of data + component_settings_aggregation_model_json2 = component_settings_aggregation_model.to_dict() + assert component_settings_aggregation_model_json2 == component_settings_aggregation_model_json + + +class TestModel_ComponentSettingsFieldsShown: + """ + Test Class for ComponentSettingsFieldsShown + """ + + def test_component_settings_fields_shown_serialization(self): + """ + Test serialization/deserialization for ComponentSettingsFieldsShown + """ + + # Construct dict forms of any model objects needed in order to build this model. + + component_settings_fields_shown_body_model = {} # ComponentSettingsFieldsShownBody + component_settings_fields_shown_body_model['use_passage'] = True + component_settings_fields_shown_body_model['field'] = 'testString' + + component_settings_fields_shown_title_model = {} # ComponentSettingsFieldsShownTitle + component_settings_fields_shown_title_model['field'] = 'testString' + + # Construct a json representation of a ComponentSettingsFieldsShown model + component_settings_fields_shown_model_json = {} + component_settings_fields_shown_model_json['body'] = component_settings_fields_shown_body_model + component_settings_fields_shown_model_json['title'] = component_settings_fields_shown_title_model + + # Construct a model instance of ComponentSettingsFieldsShown by calling from_dict on the json representation + component_settings_fields_shown_model = ComponentSettingsFieldsShown.from_dict(component_settings_fields_shown_model_json) + assert component_settings_fields_shown_model != False + + # Construct a model instance of ComponentSettingsFieldsShown by calling from_dict on the json representation + component_settings_fields_shown_model_dict = ComponentSettingsFieldsShown.from_dict(component_settings_fields_shown_model_json).__dict__ + component_settings_fields_shown_model2 = ComponentSettingsFieldsShown(**component_settings_fields_shown_model_dict) + + # Verify the model instances are equivalent + assert component_settings_fields_shown_model == component_settings_fields_shown_model2 + + # Convert model instance back to dict and verify no loss of data + component_settings_fields_shown_model_json2 = component_settings_fields_shown_model.to_dict() + assert component_settings_fields_shown_model_json2 == component_settings_fields_shown_model_json + + +class TestModel_ComponentSettingsFieldsShownBody: + """ + Test Class for ComponentSettingsFieldsShownBody + """ + + def test_component_settings_fields_shown_body_serialization(self): + """ + Test serialization/deserialization for ComponentSettingsFieldsShownBody + """ + + # Construct a json representation of a ComponentSettingsFieldsShownBody model + component_settings_fields_shown_body_model_json = {} + component_settings_fields_shown_body_model_json['use_passage'] = True + component_settings_fields_shown_body_model_json['field'] = 'testString' + + # Construct a model instance of ComponentSettingsFieldsShownBody by calling from_dict on the json representation + component_settings_fields_shown_body_model = ComponentSettingsFieldsShownBody.from_dict(component_settings_fields_shown_body_model_json) + assert component_settings_fields_shown_body_model != False + + # Construct a model instance of ComponentSettingsFieldsShownBody by calling from_dict on the json representation + component_settings_fields_shown_body_model_dict = ComponentSettingsFieldsShownBody.from_dict(component_settings_fields_shown_body_model_json).__dict__ + component_settings_fields_shown_body_model2 = ComponentSettingsFieldsShownBody(**component_settings_fields_shown_body_model_dict) + + # Verify the model instances are equivalent + assert component_settings_fields_shown_body_model == component_settings_fields_shown_body_model2 + + # Convert model instance back to dict and verify no loss of data + component_settings_fields_shown_body_model_json2 = component_settings_fields_shown_body_model.to_dict() + assert component_settings_fields_shown_body_model_json2 == component_settings_fields_shown_body_model_json + + +class TestModel_ComponentSettingsFieldsShownTitle: + """ + Test Class for ComponentSettingsFieldsShownTitle + """ + + def test_component_settings_fields_shown_title_serialization(self): + """ + Test serialization/deserialization for ComponentSettingsFieldsShownTitle + """ + + # Construct a json representation of a ComponentSettingsFieldsShownTitle model + component_settings_fields_shown_title_model_json = {} + component_settings_fields_shown_title_model_json['field'] = 'testString' + + # Construct a model instance of ComponentSettingsFieldsShownTitle by calling from_dict on the json representation + component_settings_fields_shown_title_model = ComponentSettingsFieldsShownTitle.from_dict(component_settings_fields_shown_title_model_json) + assert component_settings_fields_shown_title_model != False + + # Construct a model instance of ComponentSettingsFieldsShownTitle by calling from_dict on the json representation + component_settings_fields_shown_title_model_dict = ComponentSettingsFieldsShownTitle.from_dict(component_settings_fields_shown_title_model_json).__dict__ + component_settings_fields_shown_title_model2 = ComponentSettingsFieldsShownTitle(**component_settings_fields_shown_title_model_dict) + + # Verify the model instances are equivalent + assert component_settings_fields_shown_title_model == component_settings_fields_shown_title_model2 + + # Convert model instance back to dict and verify no loss of data + component_settings_fields_shown_title_model_json2 = component_settings_fields_shown_title_model.to_dict() + assert component_settings_fields_shown_title_model_json2 == component_settings_fields_shown_title_model_json + + +class TestModel_ComponentSettingsResponse: + """ + Test Class for ComponentSettingsResponse + """ + + def test_component_settings_response_serialization(self): + """ + Test serialization/deserialization for ComponentSettingsResponse + """ + + # Construct dict forms of any model objects needed in order to build this model. + + component_settings_fields_shown_body_model = {} # ComponentSettingsFieldsShownBody + component_settings_fields_shown_body_model['use_passage'] = True + component_settings_fields_shown_body_model['field'] = 'testString' + + component_settings_fields_shown_title_model = {} # ComponentSettingsFieldsShownTitle + component_settings_fields_shown_title_model['field'] = 'testString' + + component_settings_fields_shown_model = {} # ComponentSettingsFieldsShown + component_settings_fields_shown_model['body'] = component_settings_fields_shown_body_model + component_settings_fields_shown_model['title'] = component_settings_fields_shown_title_model + + component_settings_aggregation_model = {} # ComponentSettingsAggregation + component_settings_aggregation_model['name'] = 'testString' + component_settings_aggregation_model['label'] = 'testString' + component_settings_aggregation_model['multiple_selections_allowed'] = True + component_settings_aggregation_model['visualization_type'] = 'auto' + + # Construct a json representation of a ComponentSettingsResponse model + component_settings_response_model_json = {} + component_settings_response_model_json['fields_shown'] = component_settings_fields_shown_model + component_settings_response_model_json['autocomplete'] = True + component_settings_response_model_json['structured_search'] = True + component_settings_response_model_json['results_per_page'] = 38 + component_settings_response_model_json['aggregations'] = [component_settings_aggregation_model] + + # Construct a model instance of ComponentSettingsResponse by calling from_dict on the json representation + component_settings_response_model = ComponentSettingsResponse.from_dict(component_settings_response_model_json) + assert component_settings_response_model != False + + # Construct a model instance of ComponentSettingsResponse by calling from_dict on the json representation + component_settings_response_model_dict = ComponentSettingsResponse.from_dict(component_settings_response_model_json).__dict__ + component_settings_response_model2 = ComponentSettingsResponse(**component_settings_response_model_dict) + + # Verify the model instances are equivalent + assert component_settings_response_model == component_settings_response_model2 + + # Convert model instance back to dict and verify no loss of data + component_settings_response_model_json2 = component_settings_response_model.to_dict() + assert component_settings_response_model_json2 == component_settings_response_model_json + + +class TestModel_CreateDocumentClassifier: + """ + Test Class for CreateDocumentClassifier + """ + + def test_create_document_classifier_serialization(self): + """ + Test serialization/deserialization for CreateDocumentClassifier + """ + + # Construct dict forms of any model objects needed in order to build this model. + + document_classifier_enrichment_model = {} # DocumentClassifierEnrichment + document_classifier_enrichment_model['enrichment_id'] = 'testString' + document_classifier_enrichment_model['fields'] = ['testString'] + + classifier_federated_model_model = {} # ClassifierFederatedModel + classifier_federated_model_model['field'] = 'testString' + + # Construct a json representation of a CreateDocumentClassifier model + create_document_classifier_model_json = {} + create_document_classifier_model_json['name'] = 'testString' + create_document_classifier_model_json['description'] = 'testString' + create_document_classifier_model_json['language'] = 'en' + create_document_classifier_model_json['answer_field'] = 'testString' + create_document_classifier_model_json['enrichments'] = [document_classifier_enrichment_model] + create_document_classifier_model_json['federated_classification'] = classifier_federated_model_model + + # Construct a model instance of CreateDocumentClassifier by calling from_dict on the json representation + create_document_classifier_model = CreateDocumentClassifier.from_dict(create_document_classifier_model_json) + assert create_document_classifier_model != False + + # Construct a model instance of CreateDocumentClassifier by calling from_dict on the json representation + create_document_classifier_model_dict = CreateDocumentClassifier.from_dict(create_document_classifier_model_json).__dict__ + create_document_classifier_model2 = CreateDocumentClassifier(**create_document_classifier_model_dict) + + # Verify the model instances are equivalent + assert create_document_classifier_model == create_document_classifier_model2 + + # Convert model instance back to dict and verify no loss of data + create_document_classifier_model_json2 = create_document_classifier_model.to_dict() + assert create_document_classifier_model_json2 == create_document_classifier_model_json + + +class TestModel_CreateEnrichment: + """ + Test Class for CreateEnrichment + """ + + def test_create_enrichment_serialization(self): + """ + Test serialization/deserialization for CreateEnrichment + """ + + # Construct dict forms of any model objects needed in order to build this model. + + webhook_header_model = {} # WebhookHeader + webhook_header_model['name'] = 'testString' + webhook_header_model['value'] = 'testString' + + enrichment_options_model = {} # EnrichmentOptions + enrichment_options_model['languages'] = ['testString'] + enrichment_options_model['entity_type'] = 'testString' + enrichment_options_model['regular_expression'] = 'testString' + enrichment_options_model['result_field'] = 'testString' + enrichment_options_model['classifier_id'] = 'testString' + enrichment_options_model['model_id'] = 'testString' + enrichment_options_model['confidence_threshold'] = 0 + enrichment_options_model['top_k'] = 0 + enrichment_options_model['url'] = 'testString' + enrichment_options_model['version'] = '2023-03-31' + enrichment_options_model['secret'] = 'testString' + enrichment_options_model['headers'] = webhook_header_model + enrichment_options_model['location_encoding'] = '`utf-16`' + + # Construct a json representation of a CreateEnrichment model + create_enrichment_model_json = {} + create_enrichment_model_json['name'] = 'testString' + create_enrichment_model_json['description'] = 'testString' + create_enrichment_model_json['type'] = 'classifier' + create_enrichment_model_json['options'] = enrichment_options_model + + # Construct a model instance of CreateEnrichment by calling from_dict on the json representation + create_enrichment_model = CreateEnrichment.from_dict(create_enrichment_model_json) + assert create_enrichment_model != False + + # Construct a model instance of CreateEnrichment by calling from_dict on the json representation + create_enrichment_model_dict = CreateEnrichment.from_dict(create_enrichment_model_json).__dict__ + create_enrichment_model2 = CreateEnrichment(**create_enrichment_model_dict) + + # Verify the model instances are equivalent + assert create_enrichment_model == create_enrichment_model2 + + # Convert model instance back to dict and verify no loss of data + create_enrichment_model_json2 = create_enrichment_model.to_dict() + assert create_enrichment_model_json2 == create_enrichment_model_json + + +class TestModel_DefaultQueryParams: + """ + Test Class for DefaultQueryParams + """ + + def test_default_query_params_serialization(self): + """ + Test serialization/deserialization for DefaultQueryParams + """ + + # Construct dict forms of any model objects needed in order to build this model. + + default_query_params_passages_model = {} # DefaultQueryParamsPassages + default_query_params_passages_model['enabled'] = True + default_query_params_passages_model['count'] = 38 + default_query_params_passages_model['fields'] = ['testString'] + default_query_params_passages_model['characters'] = 38 + default_query_params_passages_model['per_document'] = True + default_query_params_passages_model['max_per_document'] = 38 + + default_query_params_table_results_model = {} # DefaultQueryParamsTableResults + default_query_params_table_results_model['enabled'] = True + default_query_params_table_results_model['count'] = 38 + default_query_params_table_results_model['per_document'] = 0 + + default_query_params_suggested_refinements_model = {} # DefaultQueryParamsSuggestedRefinements + default_query_params_suggested_refinements_model['enabled'] = True + default_query_params_suggested_refinements_model['count'] = 38 + + # Construct a json representation of a DefaultQueryParams model + default_query_params_model_json = {} + default_query_params_model_json['collection_ids'] = ['testString'] + default_query_params_model_json['passages'] = default_query_params_passages_model + default_query_params_model_json['table_results'] = default_query_params_table_results_model + default_query_params_model_json['aggregation'] = 'testString' + default_query_params_model_json['suggested_refinements'] = default_query_params_suggested_refinements_model + default_query_params_model_json['spelling_suggestions'] = True + default_query_params_model_json['highlight'] = True + default_query_params_model_json['count'] = 38 + default_query_params_model_json['sort'] = 'testString' + default_query_params_model_json['return'] = ['testString'] + + # Construct a model instance of DefaultQueryParams by calling from_dict on the json representation + default_query_params_model = DefaultQueryParams.from_dict(default_query_params_model_json) + assert default_query_params_model != False + + # Construct a model instance of DefaultQueryParams by calling from_dict on the json representation + default_query_params_model_dict = DefaultQueryParams.from_dict(default_query_params_model_json).__dict__ + default_query_params_model2 = DefaultQueryParams(**default_query_params_model_dict) + + # Verify the model instances are equivalent + assert default_query_params_model == default_query_params_model2 + + # Convert model instance back to dict and verify no loss of data + default_query_params_model_json2 = default_query_params_model.to_dict() + assert default_query_params_model_json2 == default_query_params_model_json + + +class TestModel_DefaultQueryParamsPassages: + """ + Test Class for DefaultQueryParamsPassages + """ + + def test_default_query_params_passages_serialization(self): + """ + Test serialization/deserialization for DefaultQueryParamsPassages + """ + + # Construct a json representation of a DefaultQueryParamsPassages model + default_query_params_passages_model_json = {} + default_query_params_passages_model_json['enabled'] = True + default_query_params_passages_model_json['count'] = 38 + default_query_params_passages_model_json['fields'] = ['testString'] + default_query_params_passages_model_json['characters'] = 38 + default_query_params_passages_model_json['per_document'] = True + default_query_params_passages_model_json['max_per_document'] = 38 + + # Construct a model instance of DefaultQueryParamsPassages by calling from_dict on the json representation + default_query_params_passages_model = DefaultQueryParamsPassages.from_dict(default_query_params_passages_model_json) + assert default_query_params_passages_model != False + + # Construct a model instance of DefaultQueryParamsPassages by calling from_dict on the json representation + default_query_params_passages_model_dict = DefaultQueryParamsPassages.from_dict(default_query_params_passages_model_json).__dict__ + default_query_params_passages_model2 = DefaultQueryParamsPassages(**default_query_params_passages_model_dict) + + # Verify the model instances are equivalent + assert default_query_params_passages_model == default_query_params_passages_model2 + + # Convert model instance back to dict and verify no loss of data + default_query_params_passages_model_json2 = default_query_params_passages_model.to_dict() + assert default_query_params_passages_model_json2 == default_query_params_passages_model_json + + +class TestModel_DefaultQueryParamsSuggestedRefinements: + """ + Test Class for DefaultQueryParamsSuggestedRefinements + """ + + def test_default_query_params_suggested_refinements_serialization(self): + """ + Test serialization/deserialization for DefaultQueryParamsSuggestedRefinements + """ + + # Construct a json representation of a DefaultQueryParamsSuggestedRefinements model + default_query_params_suggested_refinements_model_json = {} + default_query_params_suggested_refinements_model_json['enabled'] = True + default_query_params_suggested_refinements_model_json['count'] = 38 + + # Construct a model instance of DefaultQueryParamsSuggestedRefinements by calling from_dict on the json representation + default_query_params_suggested_refinements_model = DefaultQueryParamsSuggestedRefinements.from_dict(default_query_params_suggested_refinements_model_json) + assert default_query_params_suggested_refinements_model != False + + # Construct a model instance of DefaultQueryParamsSuggestedRefinements by calling from_dict on the json representation + default_query_params_suggested_refinements_model_dict = DefaultQueryParamsSuggestedRefinements.from_dict(default_query_params_suggested_refinements_model_json).__dict__ + default_query_params_suggested_refinements_model2 = DefaultQueryParamsSuggestedRefinements(**default_query_params_suggested_refinements_model_dict) + + # Verify the model instances are equivalent + assert default_query_params_suggested_refinements_model == default_query_params_suggested_refinements_model2 + + # Convert model instance back to dict and verify no loss of data + default_query_params_suggested_refinements_model_json2 = default_query_params_suggested_refinements_model.to_dict() + assert default_query_params_suggested_refinements_model_json2 == default_query_params_suggested_refinements_model_json + + +class TestModel_DefaultQueryParamsTableResults: + """ + Test Class for DefaultQueryParamsTableResults + """ + + def test_default_query_params_table_results_serialization(self): + """ + Test serialization/deserialization for DefaultQueryParamsTableResults + """ + + # Construct a json representation of a DefaultQueryParamsTableResults model + default_query_params_table_results_model_json = {} + default_query_params_table_results_model_json['enabled'] = True + default_query_params_table_results_model_json['count'] = 38 + default_query_params_table_results_model_json['per_document'] = 0 + + # Construct a model instance of DefaultQueryParamsTableResults by calling from_dict on the json representation + default_query_params_table_results_model = DefaultQueryParamsTableResults.from_dict(default_query_params_table_results_model_json) + assert default_query_params_table_results_model != False + + # Construct a model instance of DefaultQueryParamsTableResults by calling from_dict on the json representation + default_query_params_table_results_model_dict = DefaultQueryParamsTableResults.from_dict(default_query_params_table_results_model_json).__dict__ + default_query_params_table_results_model2 = DefaultQueryParamsTableResults(**default_query_params_table_results_model_dict) + + # Verify the model instances are equivalent + assert default_query_params_table_results_model == default_query_params_table_results_model2 + + # Convert model instance back to dict and verify no loss of data + default_query_params_table_results_model_json2 = default_query_params_table_results_model.to_dict() + assert default_query_params_table_results_model_json2 == default_query_params_table_results_model_json + + +class TestModel_DeleteDocumentResponse: + """ + Test Class for DeleteDocumentResponse + """ + + def test_delete_document_response_serialization(self): + """ + Test serialization/deserialization for DeleteDocumentResponse + """ + + # Construct a json representation of a DeleteDocumentResponse model + delete_document_response_model_json = {} + delete_document_response_model_json['document_id'] = 'testString' + delete_document_response_model_json['status'] = 'deleted' + + # Construct a model instance of DeleteDocumentResponse by calling from_dict on the json representation + delete_document_response_model = DeleteDocumentResponse.from_dict(delete_document_response_model_json) + assert delete_document_response_model != False + + # Construct a model instance of DeleteDocumentResponse by calling from_dict on the json representation + delete_document_response_model_dict = DeleteDocumentResponse.from_dict(delete_document_response_model_json).__dict__ + delete_document_response_model2 = DeleteDocumentResponse(**delete_document_response_model_dict) + + # Verify the model instances are equivalent + assert delete_document_response_model == delete_document_response_model2 + + # Convert model instance back to dict and verify no loss of data + delete_document_response_model_json2 = delete_document_response_model.to_dict() + assert delete_document_response_model_json2 == delete_document_response_model_json + + +class TestModel_DocumentAccepted: + """ + Test Class for DocumentAccepted + """ + + def test_document_accepted_serialization(self): + """ + Test serialization/deserialization for DocumentAccepted + """ + + # Construct a json representation of a DocumentAccepted model + document_accepted_model_json = {} + document_accepted_model_json['document_id'] = 'testString' + document_accepted_model_json['status'] = 'processing' + + # Construct a model instance of DocumentAccepted by calling from_dict on the json representation + document_accepted_model = DocumentAccepted.from_dict(document_accepted_model_json) + assert document_accepted_model != False + + # Construct a model instance of DocumentAccepted by calling from_dict on the json representation + document_accepted_model_dict = DocumentAccepted.from_dict(document_accepted_model_json).__dict__ + document_accepted_model2 = DocumentAccepted(**document_accepted_model_dict) + + # Verify the model instances are equivalent + assert document_accepted_model == document_accepted_model2 + + # Convert model instance back to dict and verify no loss of data + document_accepted_model_json2 = document_accepted_model.to_dict() + assert document_accepted_model_json2 == document_accepted_model_json + + +class TestModel_DocumentAttribute: + """ + Test Class for DocumentAttribute + """ + + def test_document_attribute_serialization(self): + """ + Test serialization/deserialization for DocumentAttribute + """ + + # Construct dict forms of any model objects needed in order to build this model. + + table_element_location_model = {} # TableElementLocation + table_element_location_model['begin'] = 26 + table_element_location_model['end'] = 26 + + # Construct a json representation of a DocumentAttribute model + document_attribute_model_json = {} + document_attribute_model_json['type'] = 'testString' + document_attribute_model_json['text'] = 'testString' + document_attribute_model_json['location'] = table_element_location_model + + # Construct a model instance of DocumentAttribute by calling from_dict on the json representation + document_attribute_model = DocumentAttribute.from_dict(document_attribute_model_json) + assert document_attribute_model != False + + # Construct a model instance of DocumentAttribute by calling from_dict on the json representation + document_attribute_model_dict = DocumentAttribute.from_dict(document_attribute_model_json).__dict__ + document_attribute_model2 = DocumentAttribute(**document_attribute_model_dict) + + # Verify the model instances are equivalent + assert document_attribute_model == document_attribute_model2 + + # Convert model instance back to dict and verify no loss of data + document_attribute_model_json2 = document_attribute_model.to_dict() + assert document_attribute_model_json2 == document_attribute_model_json + + +class TestModel_DocumentClassifier: + """ + Test Class for DocumentClassifier + """ + + def test_document_classifier_serialization(self): + """ + Test serialization/deserialization for DocumentClassifier + """ + + # Construct dict forms of any model objects needed in order to build this model. + + document_classifier_enrichment_model = {} # DocumentClassifierEnrichment + document_classifier_enrichment_model['enrichment_id'] = 'testString' + document_classifier_enrichment_model['fields'] = ['testString'] + + classifier_federated_model_model = {} # ClassifierFederatedModel + classifier_federated_model_model['field'] = 'testString' + + # Construct a json representation of a DocumentClassifier model + document_classifier_model_json = {} + document_classifier_model_json['name'] = 'testString' + document_classifier_model_json['description'] = 'testString' + document_classifier_model_json['language'] = 'en' + document_classifier_model_json['enrichments'] = [document_classifier_enrichment_model] + document_classifier_model_json['recognized_fields'] = ['testString'] + document_classifier_model_json['answer_field'] = 'testString' + document_classifier_model_json['training_data_file'] = 'testString' + document_classifier_model_json['test_data_file'] = 'testString' + document_classifier_model_json['federated_classification'] = classifier_federated_model_model + + # Construct a model instance of DocumentClassifier by calling from_dict on the json representation + document_classifier_model = DocumentClassifier.from_dict(document_classifier_model_json) + assert document_classifier_model != False + + # Construct a model instance of DocumentClassifier by calling from_dict on the json representation + document_classifier_model_dict = DocumentClassifier.from_dict(document_classifier_model_json).__dict__ + document_classifier_model2 = DocumentClassifier(**document_classifier_model_dict) + + # Verify the model instances are equivalent + assert document_classifier_model == document_classifier_model2 + + # Convert model instance back to dict and verify no loss of data + document_classifier_model_json2 = document_classifier_model.to_dict() + assert document_classifier_model_json2 == document_classifier_model_json + + +class TestModel_DocumentClassifierEnrichment: + """ + Test Class for DocumentClassifierEnrichment + """ + + def test_document_classifier_enrichment_serialization(self): + """ + Test serialization/deserialization for DocumentClassifierEnrichment + """ + + # Construct a json representation of a DocumentClassifierEnrichment model + document_classifier_enrichment_model_json = {} + document_classifier_enrichment_model_json['enrichment_id'] = 'testString' + document_classifier_enrichment_model_json['fields'] = ['testString'] + + # Construct a model instance of DocumentClassifierEnrichment by calling from_dict on the json representation + document_classifier_enrichment_model = DocumentClassifierEnrichment.from_dict(document_classifier_enrichment_model_json) + assert document_classifier_enrichment_model != False + + # Construct a model instance of DocumentClassifierEnrichment by calling from_dict on the json representation + document_classifier_enrichment_model_dict = DocumentClassifierEnrichment.from_dict(document_classifier_enrichment_model_json).__dict__ + document_classifier_enrichment_model2 = DocumentClassifierEnrichment(**document_classifier_enrichment_model_dict) + + # Verify the model instances are equivalent + assert document_classifier_enrichment_model == document_classifier_enrichment_model2 + + # Convert model instance back to dict and verify no loss of data + document_classifier_enrichment_model_json2 = document_classifier_enrichment_model.to_dict() + assert document_classifier_enrichment_model_json2 == document_classifier_enrichment_model_json + + +class TestModel_DocumentClassifierModel: + """ + Test Class for DocumentClassifierModel + """ + + def test_document_classifier_model_serialization(self): + """ + Test serialization/deserialization for DocumentClassifierModel + """ + + # Construct dict forms of any model objects needed in order to build this model. + + model_evaluation_micro_average_model = {} # ModelEvaluationMicroAverage + model_evaluation_micro_average_model['precision'] = 0 + model_evaluation_micro_average_model['recall'] = 0 + model_evaluation_micro_average_model['f1'] = 0 + + model_evaluation_macro_average_model = {} # ModelEvaluationMacroAverage + model_evaluation_macro_average_model['precision'] = 0 + model_evaluation_macro_average_model['recall'] = 0 + model_evaluation_macro_average_model['f1'] = 0 + + per_class_model_evaluation_model = {} # PerClassModelEvaluation + per_class_model_evaluation_model['name'] = 'testString' + per_class_model_evaluation_model['precision'] = 0 + per_class_model_evaluation_model['recall'] = 0 + per_class_model_evaluation_model['f1'] = 0 + + classifier_model_evaluation_model = {} # ClassifierModelEvaluation + classifier_model_evaluation_model['micro_average'] = model_evaluation_micro_average_model + classifier_model_evaluation_model['macro_average'] = model_evaluation_macro_average_model + classifier_model_evaluation_model['per_class'] = [per_class_model_evaluation_model] + + # Construct a json representation of a DocumentClassifierModel model + document_classifier_model_model_json = {} + document_classifier_model_model_json['name'] = 'testString' + document_classifier_model_model_json['description'] = 'testString' + document_classifier_model_model_json['training_data_file'] = 'testString' + document_classifier_model_model_json['test_data_file'] = 'testString' + document_classifier_model_model_json['status'] = 'training' + document_classifier_model_model_json['evaluation'] = classifier_model_evaluation_model + document_classifier_model_model_json['enrichment_id'] = 'testString' + + # Construct a model instance of DocumentClassifierModel by calling from_dict on the json representation + document_classifier_model_model = DocumentClassifierModel.from_dict(document_classifier_model_model_json) + assert document_classifier_model_model != False + + # Construct a model instance of DocumentClassifierModel by calling from_dict on the json representation + document_classifier_model_model_dict = DocumentClassifierModel.from_dict(document_classifier_model_model_json).__dict__ + document_classifier_model_model2 = DocumentClassifierModel(**document_classifier_model_model_dict) + + # Verify the model instances are equivalent + assert document_classifier_model_model == document_classifier_model_model2 + + # Convert model instance back to dict and verify no loss of data + document_classifier_model_model_json2 = document_classifier_model_model.to_dict() + assert document_classifier_model_model_json2 == document_classifier_model_model_json + + +class TestModel_DocumentClassifierModels: + """ + Test Class for DocumentClassifierModels + """ + + def test_document_classifier_models_serialization(self): + """ + Test serialization/deserialization for DocumentClassifierModels + """ + + # Construct dict forms of any model objects needed in order to build this model. + + model_evaluation_micro_average_model = {} # ModelEvaluationMicroAverage + model_evaluation_micro_average_model['precision'] = 0 + model_evaluation_micro_average_model['recall'] = 0 + model_evaluation_micro_average_model['f1'] = 0 + + model_evaluation_macro_average_model = {} # ModelEvaluationMacroAverage + model_evaluation_macro_average_model['precision'] = 0 + model_evaluation_macro_average_model['recall'] = 0 + model_evaluation_macro_average_model['f1'] = 0 + + per_class_model_evaluation_model = {} # PerClassModelEvaluation + per_class_model_evaluation_model['name'] = 'testString' + per_class_model_evaluation_model['precision'] = 0 + per_class_model_evaluation_model['recall'] = 0 + per_class_model_evaluation_model['f1'] = 0 + + classifier_model_evaluation_model = {} # ClassifierModelEvaluation + classifier_model_evaluation_model['micro_average'] = model_evaluation_micro_average_model + classifier_model_evaluation_model['macro_average'] = model_evaluation_macro_average_model + classifier_model_evaluation_model['per_class'] = [per_class_model_evaluation_model] + + document_classifier_model_model = {} # DocumentClassifierModel + document_classifier_model_model['name'] = 'testString' + document_classifier_model_model['description'] = 'testString' + document_classifier_model_model['training_data_file'] = 'testString' + document_classifier_model_model['test_data_file'] = 'testString' + document_classifier_model_model['status'] = 'training' + document_classifier_model_model['evaluation'] = classifier_model_evaluation_model + document_classifier_model_model['enrichment_id'] = 'testString' + + # Construct a json representation of a DocumentClassifierModels model + document_classifier_models_model_json = {} + document_classifier_models_model_json['models'] = [document_classifier_model_model] + + # Construct a model instance of DocumentClassifierModels by calling from_dict on the json representation + document_classifier_models_model = DocumentClassifierModels.from_dict(document_classifier_models_model_json) + assert document_classifier_models_model != False + + # Construct a model instance of DocumentClassifierModels by calling from_dict on the json representation + document_classifier_models_model_dict = DocumentClassifierModels.from_dict(document_classifier_models_model_json).__dict__ + document_classifier_models_model2 = DocumentClassifierModels(**document_classifier_models_model_dict) + + # Verify the model instances are equivalent + assert document_classifier_models_model == document_classifier_models_model2 + + # Convert model instance back to dict and verify no loss of data + document_classifier_models_model_json2 = document_classifier_models_model.to_dict() + assert document_classifier_models_model_json2 == document_classifier_models_model_json + + +class TestModel_DocumentClassifiers: + """ + Test Class for DocumentClassifiers + """ + + def test_document_classifiers_serialization(self): + """ + Test serialization/deserialization for DocumentClassifiers + """ + + # Construct dict forms of any model objects needed in order to build this model. + + document_classifier_enrichment_model = {} # DocumentClassifierEnrichment + document_classifier_enrichment_model['enrichment_id'] = 'testString' + document_classifier_enrichment_model['fields'] = ['testString'] + + classifier_federated_model_model = {} # ClassifierFederatedModel + classifier_federated_model_model['field'] = 'testString' + + document_classifier_model = {} # DocumentClassifier + document_classifier_model['name'] = 'testString' + document_classifier_model['description'] = 'testString' + document_classifier_model['language'] = 'en' + document_classifier_model['enrichments'] = [document_classifier_enrichment_model] + document_classifier_model['recognized_fields'] = ['testString'] + document_classifier_model['answer_field'] = 'testString' + document_classifier_model['training_data_file'] = 'testString' + document_classifier_model['test_data_file'] = 'testString' + document_classifier_model['federated_classification'] = classifier_federated_model_model + + # Construct a json representation of a DocumentClassifiers model + document_classifiers_model_json = {} + document_classifiers_model_json['classifiers'] = [document_classifier_model] + + # Construct a model instance of DocumentClassifiers by calling from_dict on the json representation + document_classifiers_model = DocumentClassifiers.from_dict(document_classifiers_model_json) + assert document_classifiers_model != False + + # Construct a model instance of DocumentClassifiers by calling from_dict on the json representation + document_classifiers_model_dict = DocumentClassifiers.from_dict(document_classifiers_model_json).__dict__ + document_classifiers_model2 = DocumentClassifiers(**document_classifiers_model_dict) + + # Verify the model instances are equivalent + assert document_classifiers_model == document_classifiers_model2 + + # Convert model instance back to dict and verify no loss of data + document_classifiers_model_json2 = document_classifiers_model.to_dict() + assert document_classifiers_model_json2 == document_classifiers_model_json + + +class TestModel_DocumentDetails: + """ + Test Class for DocumentDetails + """ + + def test_document_details_serialization(self): + """ + Test serialization/deserialization for DocumentDetails + """ + + # Construct dict forms of any model objects needed in order to build this model. + + notice_model = {} # Notice + + document_details_children_model = {} # DocumentDetailsChildren + document_details_children_model['have_notices'] = True + document_details_children_model['count'] = 38 + + # Construct a json representation of a DocumentDetails model + document_details_model_json = {} + document_details_model_json['status'] = 'available' + document_details_model_json['notices'] = [notice_model] + document_details_model_json['children'] = document_details_children_model + document_details_model_json['filename'] = 'testString' + document_details_model_json['file_type'] = 'testString' + document_details_model_json['sha256'] = 'testString' + + # Construct a model instance of DocumentDetails by calling from_dict on the json representation + document_details_model = DocumentDetails.from_dict(document_details_model_json) + assert document_details_model != False + + # Construct a model instance of DocumentDetails by calling from_dict on the json representation + document_details_model_dict = DocumentDetails.from_dict(document_details_model_json).__dict__ + document_details_model2 = DocumentDetails(**document_details_model_dict) + + # Verify the model instances are equivalent + assert document_details_model == document_details_model2 + + # Convert model instance back to dict and verify no loss of data + document_details_model_json2 = document_details_model.to_dict() + assert document_details_model_json2 == document_details_model_json + + +class TestModel_DocumentDetailsChildren: + """ + Test Class for DocumentDetailsChildren + """ + + def test_document_details_children_serialization(self): + """ + Test serialization/deserialization for DocumentDetailsChildren + """ + + # Construct a json representation of a DocumentDetailsChildren model + document_details_children_model_json = {} + document_details_children_model_json['have_notices'] = True + document_details_children_model_json['count'] = 38 + + # Construct a model instance of DocumentDetailsChildren by calling from_dict on the json representation + document_details_children_model = DocumentDetailsChildren.from_dict(document_details_children_model_json) + assert document_details_children_model != False + + # Construct a model instance of DocumentDetailsChildren by calling from_dict on the json representation + document_details_children_model_dict = DocumentDetailsChildren.from_dict(document_details_children_model_json).__dict__ + document_details_children_model2 = DocumentDetailsChildren(**document_details_children_model_dict) + + # Verify the model instances are equivalent + assert document_details_children_model == document_details_children_model2 + + # Convert model instance back to dict and verify no loss of data + document_details_children_model_json2 = document_details_children_model.to_dict() + assert document_details_children_model_json2 == document_details_children_model_json + + +class TestModel_Enrichment: + """ + Test Class for Enrichment + """ + + def test_enrichment_serialization(self): + """ + Test serialization/deserialization for Enrichment + """ + + # Construct dict forms of any model objects needed in order to build this model. + + webhook_header_model = {} # WebhookHeader + webhook_header_model['name'] = 'testString' + webhook_header_model['value'] = 'testString' + + enrichment_options_model = {} # EnrichmentOptions + enrichment_options_model['languages'] = ['testString'] + enrichment_options_model['entity_type'] = 'testString' + enrichment_options_model['regular_expression'] = 'testString' + enrichment_options_model['result_field'] = 'testString' + enrichment_options_model['classifier_id'] = 'testString' + enrichment_options_model['model_id'] = 'testString' + enrichment_options_model['confidence_threshold'] = 0 + enrichment_options_model['top_k'] = 0 + enrichment_options_model['url'] = 'testString' + enrichment_options_model['version'] = '2023-03-31' + enrichment_options_model['secret'] = 'testString' + enrichment_options_model['headers'] = webhook_header_model + enrichment_options_model['location_encoding'] = '`utf-16`' + + # Construct a json representation of a Enrichment model + enrichment_model_json = {} + enrichment_model_json['name'] = 'testString' + enrichment_model_json['description'] = 'testString' + enrichment_model_json['type'] = 'part_of_speech' + enrichment_model_json['options'] = enrichment_options_model + + # Construct a model instance of Enrichment by calling from_dict on the json representation + enrichment_model = Enrichment.from_dict(enrichment_model_json) + assert enrichment_model != False + + # Construct a model instance of Enrichment by calling from_dict on the json representation + enrichment_model_dict = Enrichment.from_dict(enrichment_model_json).__dict__ + enrichment_model2 = Enrichment(**enrichment_model_dict) + + # Verify the model instances are equivalent + assert enrichment_model == enrichment_model2 + + # Convert model instance back to dict and verify no loss of data + enrichment_model_json2 = enrichment_model.to_dict() + assert enrichment_model_json2 == enrichment_model_json + + +class TestModel_EnrichmentOptions: + """ + Test Class for EnrichmentOptions + """ + + def test_enrichment_options_serialization(self): + """ + Test serialization/deserialization for EnrichmentOptions + """ + + # Construct dict forms of any model objects needed in order to build this model. + + webhook_header_model = {} # WebhookHeader + webhook_header_model['name'] = 'testString' + webhook_header_model['value'] = 'testString' + + # Construct a json representation of a EnrichmentOptions model + enrichment_options_model_json = {} + enrichment_options_model_json['languages'] = ['testString'] + enrichment_options_model_json['entity_type'] = 'testString' + enrichment_options_model_json['regular_expression'] = 'testString' + enrichment_options_model_json['result_field'] = 'testString' + enrichment_options_model_json['classifier_id'] = 'testString' + enrichment_options_model_json['model_id'] = 'testString' + enrichment_options_model_json['confidence_threshold'] = 0 + enrichment_options_model_json['top_k'] = 0 + enrichment_options_model_json['url'] = 'testString' + enrichment_options_model_json['version'] = '2023-03-31' + enrichment_options_model_json['secret'] = 'testString' + enrichment_options_model_json['headers'] = webhook_header_model + enrichment_options_model_json['location_encoding'] = '`utf-16`' + + # Construct a model instance of EnrichmentOptions by calling from_dict on the json representation + enrichment_options_model = EnrichmentOptions.from_dict(enrichment_options_model_json) + assert enrichment_options_model != False + + # Construct a model instance of EnrichmentOptions by calling from_dict on the json representation + enrichment_options_model_dict = EnrichmentOptions.from_dict(enrichment_options_model_json).__dict__ + enrichment_options_model2 = EnrichmentOptions(**enrichment_options_model_dict) + + # Verify the model instances are equivalent + assert enrichment_options_model == enrichment_options_model2 + + # Convert model instance back to dict and verify no loss of data + enrichment_options_model_json2 = enrichment_options_model.to_dict() + assert enrichment_options_model_json2 == enrichment_options_model_json + + +class TestModel_Enrichments: + """ + Test Class for Enrichments + """ + + def test_enrichments_serialization(self): + """ + Test serialization/deserialization for Enrichments + """ + + # Construct dict forms of any model objects needed in order to build this model. + + webhook_header_model = {} # WebhookHeader + webhook_header_model['name'] = 'testString' + webhook_header_model['value'] = 'testString' + + enrichment_options_model = {} # EnrichmentOptions + enrichment_options_model['languages'] = ['testString'] + enrichment_options_model['entity_type'] = 'testString' + enrichment_options_model['regular_expression'] = 'testString' + enrichment_options_model['result_field'] = 'testString' + enrichment_options_model['classifier_id'] = 'testString' + enrichment_options_model['model_id'] = 'testString' + enrichment_options_model['confidence_threshold'] = 0 + enrichment_options_model['top_k'] = 0 + enrichment_options_model['url'] = 'testString' + enrichment_options_model['version'] = '2023-03-31' + enrichment_options_model['secret'] = 'testString' + enrichment_options_model['headers'] = webhook_header_model + enrichment_options_model['location_encoding'] = '`utf-16`' + + enrichment_model = {} # Enrichment + enrichment_model['name'] = 'testString' + enrichment_model['description'] = 'testString' + enrichment_model['type'] = 'part_of_speech' + enrichment_model['options'] = enrichment_options_model + + # Construct a json representation of a Enrichments model + enrichments_model_json = {} + enrichments_model_json['enrichments'] = [enrichment_model] + + # Construct a model instance of Enrichments by calling from_dict on the json representation + enrichments_model = Enrichments.from_dict(enrichments_model_json) + assert enrichments_model != False + + # Construct a model instance of Enrichments by calling from_dict on the json representation + enrichments_model_dict = Enrichments.from_dict(enrichments_model_json).__dict__ + enrichments_model2 = Enrichments(**enrichments_model_dict) + + # Verify the model instances are equivalent + assert enrichments_model == enrichments_model2 + + # Convert model instance back to dict and verify no loss of data + enrichments_model_json2 = enrichments_model.to_dict() + assert enrichments_model_json2 == enrichments_model_json + + +class TestModel_Expansion: + """ + Test Class for Expansion + """ + + def test_expansion_serialization(self): + """ + Test serialization/deserialization for Expansion + """ + + # Construct a json representation of a Expansion model + expansion_model_json = {} + expansion_model_json['input_terms'] = ['testString'] + expansion_model_json['expanded_terms'] = ['testString'] + + # Construct a model instance of Expansion by calling from_dict on the json representation + expansion_model = Expansion.from_dict(expansion_model_json) + assert expansion_model != False + + # Construct a model instance of Expansion by calling from_dict on the json representation + expansion_model_dict = Expansion.from_dict(expansion_model_json).__dict__ + expansion_model2 = Expansion(**expansion_model_dict) + + # Verify the model instances are equivalent + assert expansion_model == expansion_model2 + + # Convert model instance back to dict and verify no loss of data + expansion_model_json2 = expansion_model.to_dict() + assert expansion_model_json2 == expansion_model_json + + +class TestModel_Expansions: + """ + Test Class for Expansions + """ + + def test_expansions_serialization(self): + """ + Test serialization/deserialization for Expansions + """ + + # Construct dict forms of any model objects needed in order to build this model. + + expansion_model = {} # Expansion + expansion_model['input_terms'] = ['testString'] + expansion_model['expanded_terms'] = ['testString'] + + # Construct a json representation of a Expansions model + expansions_model_json = {} + expansions_model_json['expansions'] = [expansion_model] + + # Construct a model instance of Expansions by calling from_dict on the json representation + expansions_model = Expansions.from_dict(expansions_model_json) + assert expansions_model != False + + # Construct a model instance of Expansions by calling from_dict on the json representation + expansions_model_dict = Expansions.from_dict(expansions_model_json).__dict__ + expansions_model2 = Expansions(**expansions_model_dict) + + # Verify the model instances are equivalent + assert expansions_model == expansions_model2 + + # Convert model instance back to dict and verify no loss of data + expansions_model_json2 = expansions_model.to_dict() + assert expansions_model_json2 == expansions_model_json + + +class TestModel_Field: + """ + Test Class for Field + """ + + def test_field_serialization(self): + """ + Test serialization/deserialization for Field + """ + + # Construct a json representation of a Field model + field_model_json = {} + + # Construct a model instance of Field by calling from_dict on the json representation + field_model = Field.from_dict(field_model_json) + assert field_model != False + + # Construct a model instance of Field by calling from_dict on the json representation + field_model_dict = Field.from_dict(field_model_json).__dict__ + field_model2 = Field(**field_model_dict) + + # Verify the model instances are equivalent + assert field_model == field_model2 + + # Convert model instance back to dict and verify no loss of data + field_model_json2 = field_model.to_dict() + assert field_model_json2 == field_model_json + + +class TestModel_ListBatchesResponse: + """ + Test Class for ListBatchesResponse + """ + + def test_list_batches_response_serialization(self): + """ + Test serialization/deserialization for ListBatchesResponse + """ + + # Construct dict forms of any model objects needed in order to build this model. + + batch_details_model = {} # BatchDetails + batch_details_model['enrichment_id'] = 'fd290d8b-53e2-dba1-0000-018a8d150b85' + + # Construct a json representation of a ListBatchesResponse model + list_batches_response_model_json = {} + list_batches_response_model_json['batches'] = [batch_details_model] + + # Construct a model instance of ListBatchesResponse by calling from_dict on the json representation + list_batches_response_model = ListBatchesResponse.from_dict(list_batches_response_model_json) + assert list_batches_response_model != False + + # Construct a model instance of ListBatchesResponse by calling from_dict on the json representation + list_batches_response_model_dict = ListBatchesResponse.from_dict(list_batches_response_model_json).__dict__ + list_batches_response_model2 = ListBatchesResponse(**list_batches_response_model_dict) + + # Verify the model instances are equivalent + assert list_batches_response_model == list_batches_response_model2 + + # Convert model instance back to dict and verify no loss of data + list_batches_response_model_json2 = list_batches_response_model.to_dict() + assert list_batches_response_model_json2 == list_batches_response_model_json + + +class TestModel_ListCollectionsResponse: + """ + Test Class for ListCollectionsResponse + """ + + def test_list_collections_response_serialization(self): + """ + Test serialization/deserialization for ListCollectionsResponse + """ + + # Construct dict forms of any model objects needed in order to build this model. + + collection_model = {} # Collection + collection_model['name'] = 'example' + + # Construct a json representation of a ListCollectionsResponse model + list_collections_response_model_json = {} + list_collections_response_model_json['collections'] = [collection_model] + + # Construct a model instance of ListCollectionsResponse by calling from_dict on the json representation + list_collections_response_model = ListCollectionsResponse.from_dict(list_collections_response_model_json) + assert list_collections_response_model != False + + # Construct a model instance of ListCollectionsResponse by calling from_dict on the json representation + list_collections_response_model_dict = ListCollectionsResponse.from_dict(list_collections_response_model_json).__dict__ + list_collections_response_model2 = ListCollectionsResponse(**list_collections_response_model_dict) + + # Verify the model instances are equivalent + assert list_collections_response_model == list_collections_response_model2 + + # Convert model instance back to dict and verify no loss of data + list_collections_response_model_json2 = list_collections_response_model.to_dict() + assert list_collections_response_model_json2 == list_collections_response_model_json + + +class TestModel_ListDocumentsResponse: + """ + Test Class for ListDocumentsResponse + """ + + def test_list_documents_response_serialization(self): + """ + Test serialization/deserialization for ListDocumentsResponse + """ + + # Construct dict forms of any model objects needed in order to build this model. + + notice_model = {} # Notice + + document_details_children_model = {} # DocumentDetailsChildren + document_details_children_model['have_notices'] = True + document_details_children_model['count'] = 38 + + document_details_model = {} # DocumentDetails + document_details_model['status'] = 'available' + document_details_model['notices'] = [notice_model] + document_details_model['children'] = document_details_children_model + document_details_model['filename'] = 'testString' + document_details_model['file_type'] = 'testString' + document_details_model['sha256'] = 'testString' + + # Construct a json representation of a ListDocumentsResponse model + list_documents_response_model_json = {} + list_documents_response_model_json['matching_results'] = 38 + list_documents_response_model_json['documents'] = [document_details_model] + + # Construct a model instance of ListDocumentsResponse by calling from_dict on the json representation + list_documents_response_model = ListDocumentsResponse.from_dict(list_documents_response_model_json) + assert list_documents_response_model != False + + # Construct a model instance of ListDocumentsResponse by calling from_dict on the json representation + list_documents_response_model_dict = ListDocumentsResponse.from_dict(list_documents_response_model_json).__dict__ + list_documents_response_model2 = ListDocumentsResponse(**list_documents_response_model_dict) + + # Verify the model instances are equivalent + assert list_documents_response_model == list_documents_response_model2 + + # Convert model instance back to dict and verify no loss of data + list_documents_response_model_json2 = list_documents_response_model.to_dict() + assert list_documents_response_model_json2 == list_documents_response_model_json + + +class TestModel_ListFieldsResponse: + """ + Test Class for ListFieldsResponse + """ + + def test_list_fields_response_serialization(self): + """ + Test serialization/deserialization for ListFieldsResponse + """ + + # Construct dict forms of any model objects needed in order to build this model. + + field_model = {} # Field + + # Construct a json representation of a ListFieldsResponse model + list_fields_response_model_json = {} + list_fields_response_model_json['fields'] = [field_model] + + # Construct a model instance of ListFieldsResponse by calling from_dict on the json representation + list_fields_response_model = ListFieldsResponse.from_dict(list_fields_response_model_json) + assert list_fields_response_model != False + + # Construct a model instance of ListFieldsResponse by calling from_dict on the json representation + list_fields_response_model_dict = ListFieldsResponse.from_dict(list_fields_response_model_json).__dict__ + list_fields_response_model2 = ListFieldsResponse(**list_fields_response_model_dict) + + # Verify the model instances are equivalent + assert list_fields_response_model == list_fields_response_model2 + + # Convert model instance back to dict and verify no loss of data + list_fields_response_model_json2 = list_fields_response_model.to_dict() + assert list_fields_response_model_json2 == list_fields_response_model_json + + +class TestModel_ListProjectsResponse: + """ + Test Class for ListProjectsResponse + """ + + def test_list_projects_response_serialization(self): + """ + Test serialization/deserialization for ListProjectsResponse + """ + + # Construct dict forms of any model objects needed in order to build this model. + + project_list_details_model = {} # ProjectListDetails + project_list_details_model['name'] = 'testString' + project_list_details_model['type'] = 'intelligent_document_processing' + + # Construct a json representation of a ListProjectsResponse model + list_projects_response_model_json = {} + list_projects_response_model_json['projects'] = [project_list_details_model] + + # Construct a model instance of ListProjectsResponse by calling from_dict on the json representation + list_projects_response_model = ListProjectsResponse.from_dict(list_projects_response_model_json) + assert list_projects_response_model != False + + # Construct a model instance of ListProjectsResponse by calling from_dict on the json representation + list_projects_response_model_dict = ListProjectsResponse.from_dict(list_projects_response_model_json).__dict__ + list_projects_response_model2 = ListProjectsResponse(**list_projects_response_model_dict) + + # Verify the model instances are equivalent + assert list_projects_response_model == list_projects_response_model2 + + # Convert model instance back to dict and verify no loss of data + list_projects_response_model_json2 = list_projects_response_model.to_dict() + assert list_projects_response_model_json2 == list_projects_response_model_json + + +class TestModel_ModelEvaluationMacroAverage: + """ + Test Class for ModelEvaluationMacroAverage + """ + + def test_model_evaluation_macro_average_serialization(self): + """ + Test serialization/deserialization for ModelEvaluationMacroAverage + """ + + # Construct a json representation of a ModelEvaluationMacroAverage model + model_evaluation_macro_average_model_json = {} + model_evaluation_macro_average_model_json['precision'] = 0 + model_evaluation_macro_average_model_json['recall'] = 0 + model_evaluation_macro_average_model_json['f1'] = 0 + + # Construct a model instance of ModelEvaluationMacroAverage by calling from_dict on the json representation + model_evaluation_macro_average_model = ModelEvaluationMacroAverage.from_dict(model_evaluation_macro_average_model_json) + assert model_evaluation_macro_average_model != False + + # Construct a model instance of ModelEvaluationMacroAverage by calling from_dict on the json representation + model_evaluation_macro_average_model_dict = ModelEvaluationMacroAverage.from_dict(model_evaluation_macro_average_model_json).__dict__ + model_evaluation_macro_average_model2 = ModelEvaluationMacroAverage(**model_evaluation_macro_average_model_dict) + + # Verify the model instances are equivalent + assert model_evaluation_macro_average_model == model_evaluation_macro_average_model2 + + # Convert model instance back to dict and verify no loss of data + model_evaluation_macro_average_model_json2 = model_evaluation_macro_average_model.to_dict() + assert model_evaluation_macro_average_model_json2 == model_evaluation_macro_average_model_json + + +class TestModel_ModelEvaluationMicroAverage: + """ + Test Class for ModelEvaluationMicroAverage + """ + + def test_model_evaluation_micro_average_serialization(self): + """ + Test serialization/deserialization for ModelEvaluationMicroAverage + """ + + # Construct a json representation of a ModelEvaluationMicroAverage model + model_evaluation_micro_average_model_json = {} + model_evaluation_micro_average_model_json['precision'] = 0 + model_evaluation_micro_average_model_json['recall'] = 0 + model_evaluation_micro_average_model_json['f1'] = 0 + + # Construct a model instance of ModelEvaluationMicroAverage by calling from_dict on the json representation + model_evaluation_micro_average_model = ModelEvaluationMicroAverage.from_dict(model_evaluation_micro_average_model_json) + assert model_evaluation_micro_average_model != False + + # Construct a model instance of ModelEvaluationMicroAverage by calling from_dict on the json representation + model_evaluation_micro_average_model_dict = ModelEvaluationMicroAverage.from_dict(model_evaluation_micro_average_model_json).__dict__ + model_evaluation_micro_average_model2 = ModelEvaluationMicroAverage(**model_evaluation_micro_average_model_dict) + + # Verify the model instances are equivalent + assert model_evaluation_micro_average_model == model_evaluation_micro_average_model2 + + # Convert model instance back to dict and verify no loss of data + model_evaluation_micro_average_model_json2 = model_evaluation_micro_average_model.to_dict() + assert model_evaluation_micro_average_model_json2 == model_evaluation_micro_average_model_json + + +class TestModel_Notice: + """ + Test Class for Notice + """ + + def test_notice_serialization(self): + """ + Test serialization/deserialization for Notice + """ + + # Construct a json representation of a Notice model + notice_model_json = {} + + # Construct a model instance of Notice by calling from_dict on the json representation + notice_model = Notice.from_dict(notice_model_json) + assert notice_model != False + + # Construct a model instance of Notice by calling from_dict on the json representation + notice_model_dict = Notice.from_dict(notice_model_json).__dict__ + notice_model2 = Notice(**notice_model_dict) + + # Verify the model instances are equivalent + assert notice_model == notice_model2 + + # Convert model instance back to dict and verify no loss of data + notice_model_json2 = notice_model.to_dict() + assert notice_model_json2 == notice_model_json + + +class TestModel_PerClassModelEvaluation: + """ + Test Class for PerClassModelEvaluation + """ + + def test_per_class_model_evaluation_serialization(self): + """ + Test serialization/deserialization for PerClassModelEvaluation + """ + + # Construct a json representation of a PerClassModelEvaluation model + per_class_model_evaluation_model_json = {} + per_class_model_evaluation_model_json['name'] = 'testString' + per_class_model_evaluation_model_json['precision'] = 0 + per_class_model_evaluation_model_json['recall'] = 0 + per_class_model_evaluation_model_json['f1'] = 0 + + # Construct a model instance of PerClassModelEvaluation by calling from_dict on the json representation + per_class_model_evaluation_model = PerClassModelEvaluation.from_dict(per_class_model_evaluation_model_json) + assert per_class_model_evaluation_model != False + + # Construct a model instance of PerClassModelEvaluation by calling from_dict on the json representation + per_class_model_evaluation_model_dict = PerClassModelEvaluation.from_dict(per_class_model_evaluation_model_json).__dict__ + per_class_model_evaluation_model2 = PerClassModelEvaluation(**per_class_model_evaluation_model_dict) + + # Verify the model instances are equivalent + assert per_class_model_evaluation_model == per_class_model_evaluation_model2 + + # Convert model instance back to dict and verify no loss of data + per_class_model_evaluation_model_json2 = per_class_model_evaluation_model.to_dict() + assert per_class_model_evaluation_model_json2 == per_class_model_evaluation_model_json + + +class TestModel_ProjectDetails: + """ + Test Class for ProjectDetails + """ + + def test_project_details_serialization(self): + """ + Test serialization/deserialization for ProjectDetails + """ + + # Construct dict forms of any model objects needed in order to build this model. + + default_query_params_passages_model = {} # DefaultQueryParamsPassages + default_query_params_passages_model['enabled'] = True + default_query_params_passages_model['count'] = 38 + default_query_params_passages_model['fields'] = ['testString'] + default_query_params_passages_model['characters'] = 38 + default_query_params_passages_model['per_document'] = True + default_query_params_passages_model['max_per_document'] = 38 + + default_query_params_table_results_model = {} # DefaultQueryParamsTableResults + default_query_params_table_results_model['enabled'] = True + default_query_params_table_results_model['count'] = 38 + default_query_params_table_results_model['per_document'] = 0 + + default_query_params_suggested_refinements_model = {} # DefaultQueryParamsSuggestedRefinements + default_query_params_suggested_refinements_model['enabled'] = True + default_query_params_suggested_refinements_model['count'] = 38 + + default_query_params_model = {} # DefaultQueryParams + default_query_params_model['collection_ids'] = ['testString'] + default_query_params_model['passages'] = default_query_params_passages_model + default_query_params_model['table_results'] = default_query_params_table_results_model + default_query_params_model['aggregation'] = 'testString' + default_query_params_model['suggested_refinements'] = default_query_params_suggested_refinements_model + default_query_params_model['spelling_suggestions'] = True + default_query_params_model['highlight'] = True + default_query_params_model['count'] = 38 + default_query_params_model['sort'] = 'testString' + default_query_params_model['return'] = ['testString'] + + # Construct a json representation of a ProjectDetails model + project_details_model_json = {} + project_details_model_json['name'] = 'testString' + project_details_model_json['type'] = 'intelligent_document_processing' + project_details_model_json['default_query_parameters'] = default_query_params_model + + # Construct a model instance of ProjectDetails by calling from_dict on the json representation + project_details_model = ProjectDetails.from_dict(project_details_model_json) + assert project_details_model != False + + # Construct a model instance of ProjectDetails by calling from_dict on the json representation + project_details_model_dict = ProjectDetails.from_dict(project_details_model_json).__dict__ + project_details_model2 = ProjectDetails(**project_details_model_dict) + + # Verify the model instances are equivalent + assert project_details_model == project_details_model2 + + # Convert model instance back to dict and verify no loss of data + project_details_model_json2 = project_details_model.to_dict() + assert project_details_model_json2 == project_details_model_json + + +class TestModel_ProjectListDetails: + """ + Test Class for ProjectListDetails + """ + + def test_project_list_details_serialization(self): + """ + Test serialization/deserialization for ProjectListDetails + """ + + # Construct a json representation of a ProjectListDetails model + project_list_details_model_json = {} + project_list_details_model_json['name'] = 'testString' + project_list_details_model_json['type'] = 'intelligent_document_processing' + + # Construct a model instance of ProjectListDetails by calling from_dict on the json representation + project_list_details_model = ProjectListDetails.from_dict(project_list_details_model_json) + assert project_list_details_model != False + + # Construct a model instance of ProjectListDetails by calling from_dict on the json representation + project_list_details_model_dict = ProjectListDetails.from_dict(project_list_details_model_json).__dict__ + project_list_details_model2 = ProjectListDetails(**project_list_details_model_dict) + + # Verify the model instances are equivalent + assert project_list_details_model == project_list_details_model2 + + # Convert model instance back to dict and verify no loss of data + project_list_details_model_json2 = project_list_details_model.to_dict() + assert project_list_details_model_json2 == project_list_details_model_json + + +class TestModel_ProjectListDetailsRelevancyTrainingStatus: + """ + Test Class for ProjectListDetailsRelevancyTrainingStatus + """ + + def test_project_list_details_relevancy_training_status_serialization(self): + """ + Test serialization/deserialization for ProjectListDetailsRelevancyTrainingStatus + """ + + # Construct a json representation of a ProjectListDetailsRelevancyTrainingStatus model + project_list_details_relevancy_training_status_model_json = {} + project_list_details_relevancy_training_status_model_json['data_updated'] = 'testString' + project_list_details_relevancy_training_status_model_json['total_examples'] = 38 + project_list_details_relevancy_training_status_model_json['sufficient_label_diversity'] = True + project_list_details_relevancy_training_status_model_json['processing'] = True + project_list_details_relevancy_training_status_model_json['minimum_examples_added'] = True + project_list_details_relevancy_training_status_model_json['successfully_trained'] = 'testString' + project_list_details_relevancy_training_status_model_json['available'] = True + project_list_details_relevancy_training_status_model_json['notices'] = 38 + project_list_details_relevancy_training_status_model_json['minimum_queries_added'] = True + + # Construct a model instance of ProjectListDetailsRelevancyTrainingStatus by calling from_dict on the json representation + project_list_details_relevancy_training_status_model = ProjectListDetailsRelevancyTrainingStatus.from_dict(project_list_details_relevancy_training_status_model_json) + assert project_list_details_relevancy_training_status_model != False + + # Construct a model instance of ProjectListDetailsRelevancyTrainingStatus by calling from_dict on the json representation + project_list_details_relevancy_training_status_model_dict = ProjectListDetailsRelevancyTrainingStatus.from_dict(project_list_details_relevancy_training_status_model_json).__dict__ + project_list_details_relevancy_training_status_model2 = ProjectListDetailsRelevancyTrainingStatus(**project_list_details_relevancy_training_status_model_dict) + + # Verify the model instances are equivalent + assert project_list_details_relevancy_training_status_model == project_list_details_relevancy_training_status_model2 + + # Convert model instance back to dict and verify no loss of data + project_list_details_relevancy_training_status_model_json2 = project_list_details_relevancy_training_status_model.to_dict() + assert project_list_details_relevancy_training_status_model_json2 == project_list_details_relevancy_training_status_model_json + + +class TestModel_QueryGroupByAggregationResult: + """ + Test Class for QueryGroupByAggregationResult + """ + + def test_query_group_by_aggregation_result_serialization(self): + """ + Test serialization/deserialization for QueryGroupByAggregationResult + """ + + # Construct a json representation of a QueryGroupByAggregationResult model + query_group_by_aggregation_result_model_json = {} + query_group_by_aggregation_result_model_json['key'] = 'testString' + query_group_by_aggregation_result_model_json['matching_results'] = 38 + query_group_by_aggregation_result_model_json['relevancy'] = 72.5 + query_group_by_aggregation_result_model_json['total_matching_documents'] = 38 + query_group_by_aggregation_result_model_json['estimated_matching_results'] = 72.5 + query_group_by_aggregation_result_model_json['aggregations'] = [{'anyKey': 'anyValue'}] + + # Construct a model instance of QueryGroupByAggregationResult by calling from_dict on the json representation + query_group_by_aggregation_result_model = QueryGroupByAggregationResult.from_dict(query_group_by_aggregation_result_model_json) + assert query_group_by_aggregation_result_model != False + + # Construct a model instance of QueryGroupByAggregationResult by calling from_dict on the json representation + query_group_by_aggregation_result_model_dict = QueryGroupByAggregationResult.from_dict(query_group_by_aggregation_result_model_json).__dict__ + query_group_by_aggregation_result_model2 = QueryGroupByAggregationResult(**query_group_by_aggregation_result_model_dict) + + # Verify the model instances are equivalent + assert query_group_by_aggregation_result_model == query_group_by_aggregation_result_model2 + + # Convert model instance back to dict and verify no loss of data + query_group_by_aggregation_result_model_json2 = query_group_by_aggregation_result_model.to_dict() + assert query_group_by_aggregation_result_model_json2 == query_group_by_aggregation_result_model_json + + +class TestModel_QueryHistogramAggregationResult: + """ + Test Class for QueryHistogramAggregationResult + """ + + def test_query_histogram_aggregation_result_serialization(self): + """ + Test serialization/deserialization for QueryHistogramAggregationResult + """ + + # Construct a json representation of a QueryHistogramAggregationResult model + query_histogram_aggregation_result_model_json = {} + query_histogram_aggregation_result_model_json['key'] = 26 + query_histogram_aggregation_result_model_json['matching_results'] = 38 + query_histogram_aggregation_result_model_json['aggregations'] = [{'anyKey': 'anyValue'}] + + # Construct a model instance of QueryHistogramAggregationResult by calling from_dict on the json representation + query_histogram_aggregation_result_model = QueryHistogramAggregationResult.from_dict(query_histogram_aggregation_result_model_json) + assert query_histogram_aggregation_result_model != False + + # Construct a model instance of QueryHistogramAggregationResult by calling from_dict on the json representation + query_histogram_aggregation_result_model_dict = QueryHistogramAggregationResult.from_dict(query_histogram_aggregation_result_model_json).__dict__ + query_histogram_aggregation_result_model2 = QueryHistogramAggregationResult(**query_histogram_aggregation_result_model_dict) + + # Verify the model instances are equivalent + assert query_histogram_aggregation_result_model == query_histogram_aggregation_result_model2 + + # Convert model instance back to dict and verify no loss of data + query_histogram_aggregation_result_model_json2 = query_histogram_aggregation_result_model.to_dict() + assert query_histogram_aggregation_result_model_json2 == query_histogram_aggregation_result_model_json + + +class TestModel_QueryLargePassages: + """ + Test Class for QueryLargePassages + """ + + def test_query_large_passages_serialization(self): + """ + Test serialization/deserialization for QueryLargePassages + """ + + # Construct a json representation of a QueryLargePassages model + query_large_passages_model_json = {} + query_large_passages_model_json['enabled'] = True + query_large_passages_model_json['per_document'] = True + query_large_passages_model_json['max_per_document'] = 38 + query_large_passages_model_json['fields'] = ['testString'] + query_large_passages_model_json['count'] = 400 + query_large_passages_model_json['characters'] = 50 + query_large_passages_model_json['find_answers'] = False + query_large_passages_model_json['max_answers_per_passage'] = 1 + + # Construct a model instance of QueryLargePassages by calling from_dict on the json representation + query_large_passages_model = QueryLargePassages.from_dict(query_large_passages_model_json) + assert query_large_passages_model != False + + # Construct a model instance of QueryLargePassages by calling from_dict on the json representation + query_large_passages_model_dict = QueryLargePassages.from_dict(query_large_passages_model_json).__dict__ + query_large_passages_model2 = QueryLargePassages(**query_large_passages_model_dict) + + # Verify the model instances are equivalent + assert query_large_passages_model == query_large_passages_model2 + + # Convert model instance back to dict and verify no loss of data + query_large_passages_model_json2 = query_large_passages_model.to_dict() + assert query_large_passages_model_json2 == query_large_passages_model_json + + +class TestModel_QueryLargeSimilar: + """ + Test Class for QueryLargeSimilar + """ + + def test_query_large_similar_serialization(self): + """ + Test serialization/deserialization for QueryLargeSimilar + """ + + # Construct a json representation of a QueryLargeSimilar model + query_large_similar_model_json = {} + query_large_similar_model_json['enabled'] = False + query_large_similar_model_json['document_ids'] = ['testString'] + query_large_similar_model_json['fields'] = ['testString'] + + # Construct a model instance of QueryLargeSimilar by calling from_dict on the json representation + query_large_similar_model = QueryLargeSimilar.from_dict(query_large_similar_model_json) + assert query_large_similar_model != False + + # Construct a model instance of QueryLargeSimilar by calling from_dict on the json representation + query_large_similar_model_dict = QueryLargeSimilar.from_dict(query_large_similar_model_json).__dict__ + query_large_similar_model2 = QueryLargeSimilar(**query_large_similar_model_dict) + + # Verify the model instances are equivalent + assert query_large_similar_model == query_large_similar_model2 + + # Convert model instance back to dict and verify no loss of data + query_large_similar_model_json2 = query_large_similar_model.to_dict() + assert query_large_similar_model_json2 == query_large_similar_model_json + + +class TestModel_QueryLargeSuggestedRefinements: + """ + Test Class for QueryLargeSuggestedRefinements + """ + + def test_query_large_suggested_refinements_serialization(self): + """ + Test serialization/deserialization for QueryLargeSuggestedRefinements + """ + + # Construct a json representation of a QueryLargeSuggestedRefinements model + query_large_suggested_refinements_model_json = {} + query_large_suggested_refinements_model_json['enabled'] = True + query_large_suggested_refinements_model_json['count'] = 1 + + # Construct a model instance of QueryLargeSuggestedRefinements by calling from_dict on the json representation + query_large_suggested_refinements_model = QueryLargeSuggestedRefinements.from_dict(query_large_suggested_refinements_model_json) + assert query_large_suggested_refinements_model != False + + # Construct a model instance of QueryLargeSuggestedRefinements by calling from_dict on the json representation + query_large_suggested_refinements_model_dict = QueryLargeSuggestedRefinements.from_dict(query_large_suggested_refinements_model_json).__dict__ + query_large_suggested_refinements_model2 = QueryLargeSuggestedRefinements(**query_large_suggested_refinements_model_dict) + + # Verify the model instances are equivalent + assert query_large_suggested_refinements_model == query_large_suggested_refinements_model2 + + # Convert model instance back to dict and verify no loss of data + query_large_suggested_refinements_model_json2 = query_large_suggested_refinements_model.to_dict() + assert query_large_suggested_refinements_model_json2 == query_large_suggested_refinements_model_json + + +class TestModel_QueryLargeTableResults: + """ + Test Class for QueryLargeTableResults + """ + + def test_query_large_table_results_serialization(self): + """ + Test serialization/deserialization for QueryLargeTableResults + """ + + # Construct a json representation of a QueryLargeTableResults model + query_large_table_results_model_json = {} + query_large_table_results_model_json['enabled'] = True + query_large_table_results_model_json['count'] = 38 + + # Construct a model instance of QueryLargeTableResults by calling from_dict on the json representation + query_large_table_results_model = QueryLargeTableResults.from_dict(query_large_table_results_model_json) + assert query_large_table_results_model != False + + # Construct a model instance of QueryLargeTableResults by calling from_dict on the json representation + query_large_table_results_model_dict = QueryLargeTableResults.from_dict(query_large_table_results_model_json).__dict__ + query_large_table_results_model2 = QueryLargeTableResults(**query_large_table_results_model_dict) + + # Verify the model instances are equivalent + assert query_large_table_results_model == query_large_table_results_model2 + + # Convert model instance back to dict and verify no loss of data + query_large_table_results_model_json2 = query_large_table_results_model.to_dict() + assert query_large_table_results_model_json2 == query_large_table_results_model_json + + +class TestModel_QueryNoticesResponse: + """ + Test Class for QueryNoticesResponse + """ + + def test_query_notices_response_serialization(self): + """ + Test serialization/deserialization for QueryNoticesResponse + """ + + # Construct dict forms of any model objects needed in order to build this model. + + notice_model = {} # Notice + + # Construct a json representation of a QueryNoticesResponse model + query_notices_response_model_json = {} + query_notices_response_model_json['matching_results'] = 38 + query_notices_response_model_json['notices'] = [notice_model] + + # Construct a model instance of QueryNoticesResponse by calling from_dict on the json representation + query_notices_response_model = QueryNoticesResponse.from_dict(query_notices_response_model_json) + assert query_notices_response_model != False + + # Construct a model instance of QueryNoticesResponse by calling from_dict on the json representation + query_notices_response_model_dict = QueryNoticesResponse.from_dict(query_notices_response_model_json).__dict__ + query_notices_response_model2 = QueryNoticesResponse(**query_notices_response_model_dict) + + # Verify the model instances are equivalent + assert query_notices_response_model == query_notices_response_model2 + + # Convert model instance back to dict and verify no loss of data + query_notices_response_model_json2 = query_notices_response_model.to_dict() + assert query_notices_response_model_json2 == query_notices_response_model_json + + +class TestModel_QueryPairAggregationResult: + """ + Test Class for QueryPairAggregationResult + """ + + def test_query_pair_aggregation_result_serialization(self): + """ + Test serialization/deserialization for QueryPairAggregationResult + """ + + # Construct a json representation of a QueryPairAggregationResult model + query_pair_aggregation_result_model_json = {} + query_pair_aggregation_result_model_json['aggregations'] = [{'anyKey': 'anyValue'}] + + # Construct a model instance of QueryPairAggregationResult by calling from_dict on the json representation + query_pair_aggregation_result_model = QueryPairAggregationResult.from_dict(query_pair_aggregation_result_model_json) + assert query_pair_aggregation_result_model != False + + # Construct a model instance of QueryPairAggregationResult by calling from_dict on the json representation + query_pair_aggregation_result_model_dict = QueryPairAggregationResult.from_dict(query_pair_aggregation_result_model_json).__dict__ + query_pair_aggregation_result_model2 = QueryPairAggregationResult(**query_pair_aggregation_result_model_dict) + + # Verify the model instances are equivalent + assert query_pair_aggregation_result_model == query_pair_aggregation_result_model2 + + # Convert model instance back to dict and verify no loss of data + query_pair_aggregation_result_model_json2 = query_pair_aggregation_result_model.to_dict() + assert query_pair_aggregation_result_model_json2 == query_pair_aggregation_result_model_json + + +class TestModel_QueryResponse: + """ + Test Class for QueryResponse + """ + + def test_query_response_serialization(self): + """ + Test serialization/deserialization for QueryResponse + """ + + # Construct dict forms of any model objects needed in order to build this model. + + query_result_metadata_model = {} # QueryResultMetadata + query_result_metadata_model['document_retrieval_source'] = 'search' + query_result_metadata_model['collection_id'] = 'testString' + query_result_metadata_model['confidence'] = 0 + + result_passage_answer_model = {} # ResultPassageAnswer + result_passage_answer_model['answer_text'] = 'testString' + result_passage_answer_model['start_offset'] = 38 + result_passage_answer_model['end_offset'] = 38 + result_passage_answer_model['confidence'] = 0 + + query_result_passage_model = {} # QueryResultPassage + query_result_passage_model['passage_text'] = 'testString' + query_result_passage_model['start_offset'] = 38 + query_result_passage_model['end_offset'] = 38 + query_result_passage_model['field'] = 'testString' + query_result_passage_model['answers'] = [result_passage_answer_model] + + query_result_model = {} # QueryResult + query_result_model['document_id'] = 'testString' + query_result_model['metadata'] = {'anyKey': 'anyValue'} + query_result_model['result_metadata'] = query_result_metadata_model + query_result_model['document_passages'] = [query_result_passage_model] + query_result_model['id'] = 'watson-generated ID' + + query_term_aggregation_result_model = {} # QueryTermAggregationResult + query_term_aggregation_result_model['key'] = 'active' + query_term_aggregation_result_model['matching_results'] = 34 + query_term_aggregation_result_model['relevancy'] = 72.5 + query_term_aggregation_result_model['total_matching_documents'] = 38 + query_term_aggregation_result_model['estimated_matching_results'] = 72.5 + query_term_aggregation_result_model['aggregations'] = [{'anyKey': 'anyValue'}] + + query_aggregation_model = {} # QueryAggregationQueryTermAggregation + query_aggregation_model['type'] = 'term' + query_aggregation_model['field'] = 'field' + query_aggregation_model['count'] = 1 + query_aggregation_model['name'] = 'testString' + query_aggregation_model['results'] = [query_term_aggregation_result_model] + + retrieval_details_model = {} # RetrievalDetails + retrieval_details_model['document_retrieval_strategy'] = 'untrained' + + query_suggested_refinement_model = {} # QuerySuggestedRefinement + query_suggested_refinement_model['text'] = 'testString' + + table_element_location_model = {} # TableElementLocation + table_element_location_model['begin'] = 26 + table_element_location_model['end'] = 26 + + table_text_location_model = {} # TableTextLocation + table_text_location_model['text'] = 'testString' + table_text_location_model['location'] = table_element_location_model + + table_headers_model = {} # TableHeaders + table_headers_model['cell_id'] = 'testString' + table_headers_model['location'] = table_element_location_model + table_headers_model['text'] = 'testString' + table_headers_model['row_index_begin'] = 26 + table_headers_model['row_index_end'] = 26 + table_headers_model['column_index_begin'] = 26 + table_headers_model['column_index_end'] = 26 + + table_row_headers_model = {} # TableRowHeaders + table_row_headers_model['cell_id'] = 'testString' + table_row_headers_model['location'] = table_element_location_model + table_row_headers_model['text'] = 'testString' + table_row_headers_model['text_normalized'] = 'testString' + table_row_headers_model['row_index_begin'] = 26 + table_row_headers_model['row_index_end'] = 26 + table_row_headers_model['column_index_begin'] = 26 + table_row_headers_model['column_index_end'] = 26 + + table_column_headers_model = {} # TableColumnHeaders + table_column_headers_model['cell_id'] = 'testString' + table_column_headers_model['location'] = table_element_location_model + table_column_headers_model['text'] = 'testString' + table_column_headers_model['text_normalized'] = 'testString' + table_column_headers_model['row_index_begin'] = 26 + table_column_headers_model['row_index_end'] = 26 + table_column_headers_model['column_index_begin'] = 26 + table_column_headers_model['column_index_end'] = 26 + + table_cell_key_model = {} # TableCellKey + table_cell_key_model['cell_id'] = 'testString' + table_cell_key_model['location'] = table_element_location_model + table_cell_key_model['text'] = 'testString' + + table_cell_values_model = {} # TableCellValues + table_cell_values_model['cell_id'] = 'testString' + table_cell_values_model['location'] = table_element_location_model + table_cell_values_model['text'] = 'testString' + + table_key_value_pairs_model = {} # TableKeyValuePairs + table_key_value_pairs_model['key'] = table_cell_key_model + table_key_value_pairs_model['value'] = [table_cell_values_model] + + document_attribute_model = {} # DocumentAttribute + document_attribute_model['type'] = 'testString' + document_attribute_model['text'] = 'testString' + document_attribute_model['location'] = table_element_location_model + + table_body_cells_model = {} # TableBodyCells + table_body_cells_model['cell_id'] = 'testString' + table_body_cells_model['location'] = table_element_location_model + table_body_cells_model['text'] = 'testString' + table_body_cells_model['row_index_begin'] = 26 + table_body_cells_model['row_index_end'] = 26 + table_body_cells_model['column_index_begin'] = 26 + table_body_cells_model['column_index_end'] = 26 + table_body_cells_model['row_header_ids'] = ['testString'] + table_body_cells_model['row_header_texts'] = ['testString'] + table_body_cells_model['row_header_texts_normalized'] = ['testString'] + table_body_cells_model['column_header_ids'] = ['testString'] + table_body_cells_model['column_header_texts'] = ['testString'] + table_body_cells_model['column_header_texts_normalized'] = ['testString'] + table_body_cells_model['attributes'] = [document_attribute_model] + + table_result_table_model = {} # TableResultTable + table_result_table_model['location'] = table_element_location_model + table_result_table_model['text'] = 'testString' + table_result_table_model['section_title'] = table_text_location_model + table_result_table_model['title'] = table_text_location_model + table_result_table_model['table_headers'] = [table_headers_model] + table_result_table_model['row_headers'] = [table_row_headers_model] + table_result_table_model['column_headers'] = [table_column_headers_model] + table_result_table_model['key_value_pairs'] = [table_key_value_pairs_model] + table_result_table_model['body_cells'] = [table_body_cells_model] + table_result_table_model['contexts'] = [table_text_location_model] + + query_table_result_model = {} # QueryTableResult + query_table_result_model['table_id'] = 'testString' + query_table_result_model['source_document_id'] = 'testString' + query_table_result_model['collection_id'] = 'testString' + query_table_result_model['table_html'] = 'testString' + query_table_result_model['table_html_offset'] = 38 + query_table_result_model['table'] = table_result_table_model + + query_response_passage_model = {} # QueryResponsePassage + query_response_passage_model['passage_text'] = 'testString' + query_response_passage_model['passage_score'] = 72.5 + query_response_passage_model['document_id'] = 'testString' + query_response_passage_model['collection_id'] = 'testString' + query_response_passage_model['start_offset'] = 38 + query_response_passage_model['end_offset'] = 38 + query_response_passage_model['field'] = 'testString' + query_response_passage_model['answers'] = [result_passage_answer_model] + + # Construct a json representation of a QueryResponse model + query_response_model_json = {} + query_response_model_json['matching_results'] = 38 + query_response_model_json['results'] = [query_result_model] + query_response_model_json['aggregations'] = [query_aggregation_model] + query_response_model_json['retrieval_details'] = retrieval_details_model + query_response_model_json['suggested_query'] = 'testString' + query_response_model_json['suggested_refinements'] = [query_suggested_refinement_model] + query_response_model_json['table_results'] = [query_table_result_model] + query_response_model_json['passages'] = [query_response_passage_model] + + # Construct a model instance of QueryResponse by calling from_dict on the json representation + query_response_model = QueryResponse.from_dict(query_response_model_json) + assert query_response_model != False + + # Construct a model instance of QueryResponse by calling from_dict on the json representation + query_response_model_dict = QueryResponse.from_dict(query_response_model_json).__dict__ + query_response_model2 = QueryResponse(**query_response_model_dict) + + # Verify the model instances are equivalent + assert query_response_model == query_response_model2 + + # Convert model instance back to dict and verify no loss of data + query_response_model_json2 = query_response_model.to_dict() + assert query_response_model_json2 == query_response_model_json + + +class TestModel_QueryResponsePassage: + """ + Test Class for QueryResponsePassage + """ + + def test_query_response_passage_serialization(self): + """ + Test serialization/deserialization for QueryResponsePassage + """ + + # Construct dict forms of any model objects needed in order to build this model. + + result_passage_answer_model = {} # ResultPassageAnswer + result_passage_answer_model['answer_text'] = 'testString' + result_passage_answer_model['start_offset'] = 38 + result_passage_answer_model['end_offset'] = 38 + result_passage_answer_model['confidence'] = 0 + + # Construct a json representation of a QueryResponsePassage model + query_response_passage_model_json = {} + query_response_passage_model_json['passage_text'] = 'testString' + query_response_passage_model_json['passage_score'] = 72.5 + query_response_passage_model_json['document_id'] = 'testString' + query_response_passage_model_json['collection_id'] = 'testString' + query_response_passage_model_json['start_offset'] = 38 + query_response_passage_model_json['end_offset'] = 38 + query_response_passage_model_json['field'] = 'testString' + query_response_passage_model_json['answers'] = [result_passage_answer_model] + + # Construct a model instance of QueryResponsePassage by calling from_dict on the json representation + query_response_passage_model = QueryResponsePassage.from_dict(query_response_passage_model_json) + assert query_response_passage_model != False + + # Construct a model instance of QueryResponsePassage by calling from_dict on the json representation + query_response_passage_model_dict = QueryResponsePassage.from_dict(query_response_passage_model_json).__dict__ + query_response_passage_model2 = QueryResponsePassage(**query_response_passage_model_dict) + + # Verify the model instances are equivalent + assert query_response_passage_model == query_response_passage_model2 + + # Convert model instance back to dict and verify no loss of data + query_response_passage_model_json2 = query_response_passage_model.to_dict() + assert query_response_passage_model_json2 == query_response_passage_model_json + + +class TestModel_QueryResult: + """ + Test Class for QueryResult + """ + + def test_query_result_serialization(self): + """ + Test serialization/deserialization for QueryResult + """ + + # Construct dict forms of any model objects needed in order to build this model. + + query_result_metadata_model = {} # QueryResultMetadata + query_result_metadata_model['document_retrieval_source'] = 'search' + query_result_metadata_model['collection_id'] = 'testString' + query_result_metadata_model['confidence'] = 0 + + result_passage_answer_model = {} # ResultPassageAnswer + result_passage_answer_model['answer_text'] = 'testString' + result_passage_answer_model['start_offset'] = 38 + result_passage_answer_model['end_offset'] = 38 + result_passage_answer_model['confidence'] = 0 + + query_result_passage_model = {} # QueryResultPassage + query_result_passage_model['passage_text'] = 'testString' + query_result_passage_model['start_offset'] = 38 + query_result_passage_model['end_offset'] = 38 + query_result_passage_model['field'] = 'testString' + query_result_passage_model['answers'] = [result_passage_answer_model] + + # Construct a json representation of a QueryResult model + query_result_model_json = {} + query_result_model_json['document_id'] = 'testString' + query_result_model_json['metadata'] = {'anyKey': 'anyValue'} + query_result_model_json['result_metadata'] = query_result_metadata_model + query_result_model_json['document_passages'] = [query_result_passage_model] + query_result_model_json['foo'] = 'testString' + + # Construct a model instance of QueryResult by calling from_dict on the json representation + query_result_model = QueryResult.from_dict(query_result_model_json) + assert query_result_model != False + + # Construct a model instance of QueryResult by calling from_dict on the json representation + query_result_model_dict = QueryResult.from_dict(query_result_model_json).__dict__ + query_result_model2 = QueryResult(**query_result_model_dict) + + # Verify the model instances are equivalent + assert query_result_model == query_result_model2 + + # Convert model instance back to dict and verify no loss of data + query_result_model_json2 = query_result_model.to_dict() + assert query_result_model_json2 == query_result_model_json + + # Test get_properties and set_properties methods. + query_result_model.set_properties({}) + actual_dict = query_result_model.get_properties() + assert actual_dict == {} + + expected_dict = {'foo': 'testString'} + query_result_model.set_properties(expected_dict) + actual_dict = query_result_model.get_properties() + assert actual_dict.keys() == expected_dict.keys() + + +class TestModel_QueryResultMetadata: + """ + Test Class for QueryResultMetadata + """ + + def test_query_result_metadata_serialization(self): + """ + Test serialization/deserialization for QueryResultMetadata + """ + + # Construct a json representation of a QueryResultMetadata model + query_result_metadata_model_json = {} + query_result_metadata_model_json['document_retrieval_source'] = 'search' + query_result_metadata_model_json['collection_id'] = 'testString' + query_result_metadata_model_json['confidence'] = 0 + + # Construct a model instance of QueryResultMetadata by calling from_dict on the json representation + query_result_metadata_model = QueryResultMetadata.from_dict(query_result_metadata_model_json) + assert query_result_metadata_model != False + + # Construct a model instance of QueryResultMetadata by calling from_dict on the json representation + query_result_metadata_model_dict = QueryResultMetadata.from_dict(query_result_metadata_model_json).__dict__ + query_result_metadata_model2 = QueryResultMetadata(**query_result_metadata_model_dict) + + # Verify the model instances are equivalent + assert query_result_metadata_model == query_result_metadata_model2 + + # Convert model instance back to dict and verify no loss of data + query_result_metadata_model_json2 = query_result_metadata_model.to_dict() + assert query_result_metadata_model_json2 == query_result_metadata_model_json + + +class TestModel_QueryResultPassage: + """ + Test Class for QueryResultPassage + """ + + def test_query_result_passage_serialization(self): + """ + Test serialization/deserialization for QueryResultPassage + """ + + # Construct dict forms of any model objects needed in order to build this model. + + result_passage_answer_model = {} # ResultPassageAnswer + result_passage_answer_model['answer_text'] = 'testString' + result_passage_answer_model['start_offset'] = 38 + result_passage_answer_model['end_offset'] = 38 + result_passage_answer_model['confidence'] = 0 + + # Construct a json representation of a QueryResultPassage model + query_result_passage_model_json = {} + query_result_passage_model_json['passage_text'] = 'testString' + query_result_passage_model_json['start_offset'] = 38 + query_result_passage_model_json['end_offset'] = 38 + query_result_passage_model_json['field'] = 'testString' + query_result_passage_model_json['answers'] = [result_passage_answer_model] + + # Construct a model instance of QueryResultPassage by calling from_dict on the json representation + query_result_passage_model = QueryResultPassage.from_dict(query_result_passage_model_json) + assert query_result_passage_model != False + + # Construct a model instance of QueryResultPassage by calling from_dict on the json representation + query_result_passage_model_dict = QueryResultPassage.from_dict(query_result_passage_model_json).__dict__ + query_result_passage_model2 = QueryResultPassage(**query_result_passage_model_dict) + + # Verify the model instances are equivalent + assert query_result_passage_model == query_result_passage_model2 + + # Convert model instance back to dict and verify no loss of data + query_result_passage_model_json2 = query_result_passage_model.to_dict() + assert query_result_passage_model_json2 == query_result_passage_model_json + + +class TestModel_QuerySuggestedRefinement: + """ + Test Class for QuerySuggestedRefinement + """ + + def test_query_suggested_refinement_serialization(self): + """ + Test serialization/deserialization for QuerySuggestedRefinement + """ + + # Construct a json representation of a QuerySuggestedRefinement model + query_suggested_refinement_model_json = {} + query_suggested_refinement_model_json['text'] = 'testString' + + # Construct a model instance of QuerySuggestedRefinement by calling from_dict on the json representation + query_suggested_refinement_model = QuerySuggestedRefinement.from_dict(query_suggested_refinement_model_json) + assert query_suggested_refinement_model != False + + # Construct a model instance of QuerySuggestedRefinement by calling from_dict on the json representation + query_suggested_refinement_model_dict = QuerySuggestedRefinement.from_dict(query_suggested_refinement_model_json).__dict__ + query_suggested_refinement_model2 = QuerySuggestedRefinement(**query_suggested_refinement_model_dict) + + # Verify the model instances are equivalent + assert query_suggested_refinement_model == query_suggested_refinement_model2 + + # Convert model instance back to dict and verify no loss of data + query_suggested_refinement_model_json2 = query_suggested_refinement_model.to_dict() + assert query_suggested_refinement_model_json2 == query_suggested_refinement_model_json + + +class TestModel_QueryTableResult: + """ + Test Class for QueryTableResult + """ + + def test_query_table_result_serialization(self): + """ + Test serialization/deserialization for QueryTableResult + """ + + # Construct dict forms of any model objects needed in order to build this model. + + table_element_location_model = {} # TableElementLocation + table_element_location_model['begin'] = 26 + table_element_location_model['end'] = 26 + + table_text_location_model = {} # TableTextLocation + table_text_location_model['text'] = 'testString' + table_text_location_model['location'] = table_element_location_model + + table_headers_model = {} # TableHeaders + table_headers_model['cell_id'] = 'testString' + table_headers_model['location'] = table_element_location_model + table_headers_model['text'] = 'testString' + table_headers_model['row_index_begin'] = 26 + table_headers_model['row_index_end'] = 26 + table_headers_model['column_index_begin'] = 26 + table_headers_model['column_index_end'] = 26 + + table_row_headers_model = {} # TableRowHeaders + table_row_headers_model['cell_id'] = 'testString' + table_row_headers_model['location'] = table_element_location_model + table_row_headers_model['text'] = 'testString' + table_row_headers_model['text_normalized'] = 'testString' + table_row_headers_model['row_index_begin'] = 26 + table_row_headers_model['row_index_end'] = 26 + table_row_headers_model['column_index_begin'] = 26 + table_row_headers_model['column_index_end'] = 26 + + table_column_headers_model = {} # TableColumnHeaders + table_column_headers_model['cell_id'] = 'testString' + table_column_headers_model['location'] = table_element_location_model + table_column_headers_model['text'] = 'testString' + table_column_headers_model['text_normalized'] = 'testString' + table_column_headers_model['row_index_begin'] = 26 + table_column_headers_model['row_index_end'] = 26 + table_column_headers_model['column_index_begin'] = 26 + table_column_headers_model['column_index_end'] = 26 + + table_cell_key_model = {} # TableCellKey + table_cell_key_model['cell_id'] = 'testString' + table_cell_key_model['location'] = table_element_location_model + table_cell_key_model['text'] = 'testString' + + table_cell_values_model = {} # TableCellValues + table_cell_values_model['cell_id'] = 'testString' + table_cell_values_model['location'] = table_element_location_model + table_cell_values_model['text'] = 'testString' + + table_key_value_pairs_model = {} # TableKeyValuePairs + table_key_value_pairs_model['key'] = table_cell_key_model + table_key_value_pairs_model['value'] = [table_cell_values_model] + + document_attribute_model = {} # DocumentAttribute + document_attribute_model['type'] = 'testString' + document_attribute_model['text'] = 'testString' + document_attribute_model['location'] = table_element_location_model + + table_body_cells_model = {} # TableBodyCells + table_body_cells_model['cell_id'] = 'testString' + table_body_cells_model['location'] = table_element_location_model + table_body_cells_model['text'] = 'testString' + table_body_cells_model['row_index_begin'] = 26 + table_body_cells_model['row_index_end'] = 26 + table_body_cells_model['column_index_begin'] = 26 + table_body_cells_model['column_index_end'] = 26 + table_body_cells_model['row_header_ids'] = ['testString'] + table_body_cells_model['row_header_texts'] = ['testString'] + table_body_cells_model['row_header_texts_normalized'] = ['testString'] + table_body_cells_model['column_header_ids'] = ['testString'] + table_body_cells_model['column_header_texts'] = ['testString'] + table_body_cells_model['column_header_texts_normalized'] = ['testString'] + table_body_cells_model['attributes'] = [document_attribute_model] + + table_result_table_model = {} # TableResultTable + table_result_table_model['location'] = table_element_location_model + table_result_table_model['text'] = 'testString' + table_result_table_model['section_title'] = table_text_location_model + table_result_table_model['title'] = table_text_location_model + table_result_table_model['table_headers'] = [table_headers_model] + table_result_table_model['row_headers'] = [table_row_headers_model] + table_result_table_model['column_headers'] = [table_column_headers_model] + table_result_table_model['key_value_pairs'] = [table_key_value_pairs_model] + table_result_table_model['body_cells'] = [table_body_cells_model] + table_result_table_model['contexts'] = [table_text_location_model] + + # Construct a json representation of a QueryTableResult model + query_table_result_model_json = {} + query_table_result_model_json['table_id'] = 'testString' + query_table_result_model_json['source_document_id'] = 'testString' + query_table_result_model_json['collection_id'] = 'testString' + query_table_result_model_json['table_html'] = 'testString' + query_table_result_model_json['table_html_offset'] = 38 + query_table_result_model_json['table'] = table_result_table_model + + # Construct a model instance of QueryTableResult by calling from_dict on the json representation + query_table_result_model = QueryTableResult.from_dict(query_table_result_model_json) + assert query_table_result_model != False + + # Construct a model instance of QueryTableResult by calling from_dict on the json representation + query_table_result_model_dict = QueryTableResult.from_dict(query_table_result_model_json).__dict__ + query_table_result_model2 = QueryTableResult(**query_table_result_model_dict) + + # Verify the model instances are equivalent + assert query_table_result_model == query_table_result_model2 + + # Convert model instance back to dict and verify no loss of data + query_table_result_model_json2 = query_table_result_model.to_dict() + assert query_table_result_model_json2 == query_table_result_model_json + + +class TestModel_QueryTermAggregationResult: + """ + Test Class for QueryTermAggregationResult + """ + + def test_query_term_aggregation_result_serialization(self): + """ + Test serialization/deserialization for QueryTermAggregationResult + """ + + # Construct a json representation of a QueryTermAggregationResult model + query_term_aggregation_result_model_json = {} + query_term_aggregation_result_model_json['key'] = 'testString' + query_term_aggregation_result_model_json['matching_results'] = 38 + query_term_aggregation_result_model_json['relevancy'] = 72.5 + query_term_aggregation_result_model_json['total_matching_documents'] = 38 + query_term_aggregation_result_model_json['estimated_matching_results'] = 72.5 + query_term_aggregation_result_model_json['aggregations'] = [{'anyKey': 'anyValue'}] + + # Construct a model instance of QueryTermAggregationResult by calling from_dict on the json representation + query_term_aggregation_result_model = QueryTermAggregationResult.from_dict(query_term_aggregation_result_model_json) + assert query_term_aggregation_result_model != False + + # Construct a model instance of QueryTermAggregationResult by calling from_dict on the json representation + query_term_aggregation_result_model_dict = QueryTermAggregationResult.from_dict(query_term_aggregation_result_model_json).__dict__ + query_term_aggregation_result_model2 = QueryTermAggregationResult(**query_term_aggregation_result_model_dict) + + # Verify the model instances are equivalent + assert query_term_aggregation_result_model == query_term_aggregation_result_model2 + + # Convert model instance back to dict and verify no loss of data + query_term_aggregation_result_model_json2 = query_term_aggregation_result_model.to_dict() + assert query_term_aggregation_result_model_json2 == query_term_aggregation_result_model_json + + +class TestModel_QueryTimesliceAggregationResult: + """ + Test Class for QueryTimesliceAggregationResult + """ + + def test_query_timeslice_aggregation_result_serialization(self): + """ + Test serialization/deserialization for QueryTimesliceAggregationResult + """ + + # Construct a json representation of a QueryTimesliceAggregationResult model + query_timeslice_aggregation_result_model_json = {} + query_timeslice_aggregation_result_model_json['key_as_string'] = 'testString' + query_timeslice_aggregation_result_model_json['key'] = 26 + query_timeslice_aggregation_result_model_json['matching_results'] = 26 + query_timeslice_aggregation_result_model_json['aggregations'] = [{'anyKey': 'anyValue'}] + + # Construct a model instance of QueryTimesliceAggregationResult by calling from_dict on the json representation + query_timeslice_aggregation_result_model = QueryTimesliceAggregationResult.from_dict(query_timeslice_aggregation_result_model_json) + assert query_timeslice_aggregation_result_model != False + + # Construct a model instance of QueryTimesliceAggregationResult by calling from_dict on the json representation + query_timeslice_aggregation_result_model_dict = QueryTimesliceAggregationResult.from_dict(query_timeslice_aggregation_result_model_json).__dict__ + query_timeslice_aggregation_result_model2 = QueryTimesliceAggregationResult(**query_timeslice_aggregation_result_model_dict) + + # Verify the model instances are equivalent + assert query_timeslice_aggregation_result_model == query_timeslice_aggregation_result_model2 + + # Convert model instance back to dict and verify no loss of data + query_timeslice_aggregation_result_model_json2 = query_timeslice_aggregation_result_model.to_dict() + assert query_timeslice_aggregation_result_model_json2 == query_timeslice_aggregation_result_model_json + + +class TestModel_QueryTopHitsAggregationResult: + """ + Test Class for QueryTopHitsAggregationResult + """ + + def test_query_top_hits_aggregation_result_serialization(self): + """ + Test serialization/deserialization for QueryTopHitsAggregationResult + """ + + # Construct a json representation of a QueryTopHitsAggregationResult model + query_top_hits_aggregation_result_model_json = {} + query_top_hits_aggregation_result_model_json['matching_results'] = 38 + query_top_hits_aggregation_result_model_json['hits'] = [{'anyKey': 'anyValue'}] + + # Construct a model instance of QueryTopHitsAggregationResult by calling from_dict on the json representation + query_top_hits_aggregation_result_model = QueryTopHitsAggregationResult.from_dict(query_top_hits_aggregation_result_model_json) + assert query_top_hits_aggregation_result_model != False + + # Construct a model instance of QueryTopHitsAggregationResult by calling from_dict on the json representation + query_top_hits_aggregation_result_model_dict = QueryTopHitsAggregationResult.from_dict(query_top_hits_aggregation_result_model_json).__dict__ + query_top_hits_aggregation_result_model2 = QueryTopHitsAggregationResult(**query_top_hits_aggregation_result_model_dict) + + # Verify the model instances are equivalent + assert query_top_hits_aggregation_result_model == query_top_hits_aggregation_result_model2 + + # Convert model instance back to dict and verify no loss of data + query_top_hits_aggregation_result_model_json2 = query_top_hits_aggregation_result_model.to_dict() + assert query_top_hits_aggregation_result_model_json2 == query_top_hits_aggregation_result_model_json + + +class TestModel_QueryTopicAggregationResult: + """ + Test Class for QueryTopicAggregationResult + """ + + def test_query_topic_aggregation_result_serialization(self): + """ + Test serialization/deserialization for QueryTopicAggregationResult + """ + + # Construct a json representation of a QueryTopicAggregationResult model + query_topic_aggregation_result_model_json = {} + query_topic_aggregation_result_model_json['aggregations'] = [{'anyKey': 'anyValue'}] + + # Construct a model instance of QueryTopicAggregationResult by calling from_dict on the json representation + query_topic_aggregation_result_model = QueryTopicAggregationResult.from_dict(query_topic_aggregation_result_model_json) + assert query_topic_aggregation_result_model != False + + # Construct a model instance of QueryTopicAggregationResult by calling from_dict on the json representation + query_topic_aggregation_result_model_dict = QueryTopicAggregationResult.from_dict(query_topic_aggregation_result_model_json).__dict__ + query_topic_aggregation_result_model2 = QueryTopicAggregationResult(**query_topic_aggregation_result_model_dict) + + # Verify the model instances are equivalent + assert query_topic_aggregation_result_model == query_topic_aggregation_result_model2 + + # Convert model instance back to dict and verify no loss of data + query_topic_aggregation_result_model_json2 = query_topic_aggregation_result_model.to_dict() + assert query_topic_aggregation_result_model_json2 == query_topic_aggregation_result_model_json + + +class TestModel_QueryTrendAggregationResult: + """ + Test Class for QueryTrendAggregationResult + """ + + def test_query_trend_aggregation_result_serialization(self): + """ + Test serialization/deserialization for QueryTrendAggregationResult + """ + + # Construct a json representation of a QueryTrendAggregationResult model + query_trend_aggregation_result_model_json = {} + query_trend_aggregation_result_model_json['aggregations'] = [{'anyKey': 'anyValue'}] + + # Construct a model instance of QueryTrendAggregationResult by calling from_dict on the json representation + query_trend_aggregation_result_model = QueryTrendAggregationResult.from_dict(query_trend_aggregation_result_model_json) + assert query_trend_aggregation_result_model != False + + # Construct a model instance of QueryTrendAggregationResult by calling from_dict on the json representation + query_trend_aggregation_result_model_dict = QueryTrendAggregationResult.from_dict(query_trend_aggregation_result_model_json).__dict__ + query_trend_aggregation_result_model2 = QueryTrendAggregationResult(**query_trend_aggregation_result_model_dict) + + # Verify the model instances are equivalent + assert query_trend_aggregation_result_model == query_trend_aggregation_result_model2 + + # Convert model instance back to dict and verify no loss of data + query_trend_aggregation_result_model_json2 = query_trend_aggregation_result_model.to_dict() + assert query_trend_aggregation_result_model_json2 == query_trend_aggregation_result_model_json + + +class TestModel_ResultPassageAnswer: + """ + Test Class for ResultPassageAnswer + """ + + def test_result_passage_answer_serialization(self): + """ + Test serialization/deserialization for ResultPassageAnswer + """ + + # Construct a json representation of a ResultPassageAnswer model + result_passage_answer_model_json = {} + result_passage_answer_model_json['answer_text'] = 'testString' + result_passage_answer_model_json['start_offset'] = 38 + result_passage_answer_model_json['end_offset'] = 38 + result_passage_answer_model_json['confidence'] = 0 + + # Construct a model instance of ResultPassageAnswer by calling from_dict on the json representation + result_passage_answer_model = ResultPassageAnswer.from_dict(result_passage_answer_model_json) + assert result_passage_answer_model != False + + # Construct a model instance of ResultPassageAnswer by calling from_dict on the json representation + result_passage_answer_model_dict = ResultPassageAnswer.from_dict(result_passage_answer_model_json).__dict__ + result_passage_answer_model2 = ResultPassageAnswer(**result_passage_answer_model_dict) + + # Verify the model instances are equivalent + assert result_passage_answer_model == result_passage_answer_model2 + + # Convert model instance back to dict and verify no loss of data + result_passage_answer_model_json2 = result_passage_answer_model.to_dict() + assert result_passage_answer_model_json2 == result_passage_answer_model_json + + +class TestModel_RetrievalDetails: + """ + Test Class for RetrievalDetails + """ + + def test_retrieval_details_serialization(self): + """ + Test serialization/deserialization for RetrievalDetails + """ + + # Construct a json representation of a RetrievalDetails model + retrieval_details_model_json = {} + retrieval_details_model_json['document_retrieval_strategy'] = 'untrained' + + # Construct a model instance of RetrievalDetails by calling from_dict on the json representation + retrieval_details_model = RetrievalDetails.from_dict(retrieval_details_model_json) + assert retrieval_details_model != False + + # Construct a model instance of RetrievalDetails by calling from_dict on the json representation + retrieval_details_model_dict = RetrievalDetails.from_dict(retrieval_details_model_json).__dict__ + retrieval_details_model2 = RetrievalDetails(**retrieval_details_model_dict) + + # Verify the model instances are equivalent + assert retrieval_details_model == retrieval_details_model2 + + # Convert model instance back to dict and verify no loss of data + retrieval_details_model_json2 = retrieval_details_model.to_dict() + assert retrieval_details_model_json2 == retrieval_details_model_json + + +class TestModel_StopWordList: + """ + Test Class for StopWordList + """ + + def test_stop_word_list_serialization(self): + """ + Test serialization/deserialization for StopWordList + """ + + # Construct a json representation of a StopWordList model + stop_word_list_model_json = {} + stop_word_list_model_json['stopwords'] = ['testString'] + + # Construct a model instance of StopWordList by calling from_dict on the json representation + stop_word_list_model = StopWordList.from_dict(stop_word_list_model_json) + assert stop_word_list_model != False + + # Construct a model instance of StopWordList by calling from_dict on the json representation + stop_word_list_model_dict = StopWordList.from_dict(stop_word_list_model_json).__dict__ + stop_word_list_model2 = StopWordList(**stop_word_list_model_dict) + + # Verify the model instances are equivalent + assert stop_word_list_model == stop_word_list_model2 + + # Convert model instance back to dict and verify no loss of data + stop_word_list_model_json2 = stop_word_list_model.to_dict() + assert stop_word_list_model_json2 == stop_word_list_model_json + + +class TestModel_TableBodyCells: + """ + Test Class for TableBodyCells + """ + + def test_table_body_cells_serialization(self): + """ + Test serialization/deserialization for TableBodyCells + """ + + # Construct dict forms of any model objects needed in order to build this model. + + table_element_location_model = {} # TableElementLocation + table_element_location_model['begin'] = 26 + table_element_location_model['end'] = 26 + + document_attribute_model = {} # DocumentAttribute + document_attribute_model['type'] = 'testString' + document_attribute_model['text'] = 'testString' + document_attribute_model['location'] = table_element_location_model + + # Construct a json representation of a TableBodyCells model + table_body_cells_model_json = {} + table_body_cells_model_json['cell_id'] = 'testString' + table_body_cells_model_json['location'] = table_element_location_model + table_body_cells_model_json['text'] = 'testString' + table_body_cells_model_json['row_index_begin'] = 26 + table_body_cells_model_json['row_index_end'] = 26 + table_body_cells_model_json['column_index_begin'] = 26 + table_body_cells_model_json['column_index_end'] = 26 + table_body_cells_model_json['row_header_ids'] = ['testString'] + table_body_cells_model_json['row_header_texts'] = ['testString'] + table_body_cells_model_json['row_header_texts_normalized'] = ['testString'] + table_body_cells_model_json['column_header_ids'] = ['testString'] + table_body_cells_model_json['column_header_texts'] = ['testString'] + table_body_cells_model_json['column_header_texts_normalized'] = ['testString'] + table_body_cells_model_json['attributes'] = [document_attribute_model] + + # Construct a model instance of TableBodyCells by calling from_dict on the json representation + table_body_cells_model = TableBodyCells.from_dict(table_body_cells_model_json) + assert table_body_cells_model != False + + # Construct a model instance of TableBodyCells by calling from_dict on the json representation + table_body_cells_model_dict = TableBodyCells.from_dict(table_body_cells_model_json).__dict__ + table_body_cells_model2 = TableBodyCells(**table_body_cells_model_dict) + + # Verify the model instances are equivalent + assert table_body_cells_model == table_body_cells_model2 + + # Convert model instance back to dict and verify no loss of data + table_body_cells_model_json2 = table_body_cells_model.to_dict() + assert table_body_cells_model_json2 == table_body_cells_model_json + + +class TestModel_TableCellKey: + """ + Test Class for TableCellKey + """ + + def test_table_cell_key_serialization(self): + """ + Test serialization/deserialization for TableCellKey + """ + + # Construct dict forms of any model objects needed in order to build this model. + + table_element_location_model = {} # TableElementLocation + table_element_location_model['begin'] = 26 + table_element_location_model['end'] = 26 + + # Construct a json representation of a TableCellKey model + table_cell_key_model_json = {} + table_cell_key_model_json['cell_id'] = 'testString' + table_cell_key_model_json['location'] = table_element_location_model + table_cell_key_model_json['text'] = 'testString' + + # Construct a model instance of TableCellKey by calling from_dict on the json representation + table_cell_key_model = TableCellKey.from_dict(table_cell_key_model_json) + assert table_cell_key_model != False + + # Construct a model instance of TableCellKey by calling from_dict on the json representation + table_cell_key_model_dict = TableCellKey.from_dict(table_cell_key_model_json).__dict__ + table_cell_key_model2 = TableCellKey(**table_cell_key_model_dict) + + # Verify the model instances are equivalent + assert table_cell_key_model == table_cell_key_model2 + + # Convert model instance back to dict and verify no loss of data + table_cell_key_model_json2 = table_cell_key_model.to_dict() + assert table_cell_key_model_json2 == table_cell_key_model_json + + +class TestModel_TableCellValues: + """ + Test Class for TableCellValues + """ + + def test_table_cell_values_serialization(self): + """ + Test serialization/deserialization for TableCellValues + """ + + # Construct dict forms of any model objects needed in order to build this model. + + table_element_location_model = {} # TableElementLocation + table_element_location_model['begin'] = 26 + table_element_location_model['end'] = 26 + + # Construct a json representation of a TableCellValues model + table_cell_values_model_json = {} + table_cell_values_model_json['cell_id'] = 'testString' + table_cell_values_model_json['location'] = table_element_location_model + table_cell_values_model_json['text'] = 'testString' + + # Construct a model instance of TableCellValues by calling from_dict on the json representation + table_cell_values_model = TableCellValues.from_dict(table_cell_values_model_json) + assert table_cell_values_model != False + + # Construct a model instance of TableCellValues by calling from_dict on the json representation + table_cell_values_model_dict = TableCellValues.from_dict(table_cell_values_model_json).__dict__ + table_cell_values_model2 = TableCellValues(**table_cell_values_model_dict) + + # Verify the model instances are equivalent + assert table_cell_values_model == table_cell_values_model2 + + # Convert model instance back to dict and verify no loss of data + table_cell_values_model_json2 = table_cell_values_model.to_dict() + assert table_cell_values_model_json2 == table_cell_values_model_json + + +class TestModel_TableColumnHeaders: + """ + Test Class for TableColumnHeaders + """ + + def test_table_column_headers_serialization(self): + """ + Test serialization/deserialization for TableColumnHeaders + """ + + # Construct dict forms of any model objects needed in order to build this model. + + table_element_location_model = {} # TableElementLocation + table_element_location_model['begin'] = 26 + table_element_location_model['end'] = 26 + + # Construct a json representation of a TableColumnHeaders model + table_column_headers_model_json = {} + table_column_headers_model_json['cell_id'] = 'testString' + table_column_headers_model_json['location'] = table_element_location_model + table_column_headers_model_json['text'] = 'testString' + table_column_headers_model_json['text_normalized'] = 'testString' + table_column_headers_model_json['row_index_begin'] = 26 + table_column_headers_model_json['row_index_end'] = 26 + table_column_headers_model_json['column_index_begin'] = 26 + table_column_headers_model_json['column_index_end'] = 26 + + # Construct a model instance of TableColumnHeaders by calling from_dict on the json representation + table_column_headers_model = TableColumnHeaders.from_dict(table_column_headers_model_json) + assert table_column_headers_model != False + + # Construct a model instance of TableColumnHeaders by calling from_dict on the json representation + table_column_headers_model_dict = TableColumnHeaders.from_dict(table_column_headers_model_json).__dict__ + table_column_headers_model2 = TableColumnHeaders(**table_column_headers_model_dict) + + # Verify the model instances are equivalent + assert table_column_headers_model == table_column_headers_model2 + + # Convert model instance back to dict and verify no loss of data + table_column_headers_model_json2 = table_column_headers_model.to_dict() + assert table_column_headers_model_json2 == table_column_headers_model_json + + +class TestModel_TableElementLocation: + """ + Test Class for TableElementLocation + """ + + def test_table_element_location_serialization(self): + """ + Test serialization/deserialization for TableElementLocation + """ + + # Construct a json representation of a TableElementLocation model + table_element_location_model_json = {} + table_element_location_model_json['begin'] = 26 + table_element_location_model_json['end'] = 26 + + # Construct a model instance of TableElementLocation by calling from_dict on the json representation + table_element_location_model = TableElementLocation.from_dict(table_element_location_model_json) + assert table_element_location_model != False + + # Construct a model instance of TableElementLocation by calling from_dict on the json representation + table_element_location_model_dict = TableElementLocation.from_dict(table_element_location_model_json).__dict__ + table_element_location_model2 = TableElementLocation(**table_element_location_model_dict) + + # Verify the model instances are equivalent + assert table_element_location_model == table_element_location_model2 + + # Convert model instance back to dict and verify no loss of data + table_element_location_model_json2 = table_element_location_model.to_dict() + assert table_element_location_model_json2 == table_element_location_model_json + + +class TestModel_TableHeaders: + """ + Test Class for TableHeaders + """ + + def test_table_headers_serialization(self): + """ + Test serialization/deserialization for TableHeaders + """ + + # Construct dict forms of any model objects needed in order to build this model. + + table_element_location_model = {} # TableElementLocation + table_element_location_model['begin'] = 26 + table_element_location_model['end'] = 26 + + # Construct a json representation of a TableHeaders model + table_headers_model_json = {} + table_headers_model_json['cell_id'] = 'testString' + table_headers_model_json['location'] = table_element_location_model + table_headers_model_json['text'] = 'testString' + table_headers_model_json['row_index_begin'] = 26 + table_headers_model_json['row_index_end'] = 26 + table_headers_model_json['column_index_begin'] = 26 + table_headers_model_json['column_index_end'] = 26 + + # Construct a model instance of TableHeaders by calling from_dict on the json representation + table_headers_model = TableHeaders.from_dict(table_headers_model_json) + assert table_headers_model != False + + # Construct a model instance of TableHeaders by calling from_dict on the json representation + table_headers_model_dict = TableHeaders.from_dict(table_headers_model_json).__dict__ + table_headers_model2 = TableHeaders(**table_headers_model_dict) + + # Verify the model instances are equivalent + assert table_headers_model == table_headers_model2 + + # Convert model instance back to dict and verify no loss of data + table_headers_model_json2 = table_headers_model.to_dict() + assert table_headers_model_json2 == table_headers_model_json + + +class TestModel_TableKeyValuePairs: + """ + Test Class for TableKeyValuePairs + """ + + def test_table_key_value_pairs_serialization(self): + """ + Test serialization/deserialization for TableKeyValuePairs + """ + + # Construct dict forms of any model objects needed in order to build this model. + + table_element_location_model = {} # TableElementLocation + table_element_location_model['begin'] = 26 + table_element_location_model['end'] = 26 + + table_cell_key_model = {} # TableCellKey + table_cell_key_model['cell_id'] = 'testString' + table_cell_key_model['location'] = table_element_location_model + table_cell_key_model['text'] = 'testString' + + table_cell_values_model = {} # TableCellValues + table_cell_values_model['cell_id'] = 'testString' + table_cell_values_model['location'] = table_element_location_model + table_cell_values_model['text'] = 'testString' + + # Construct a json representation of a TableKeyValuePairs model + table_key_value_pairs_model_json = {} + table_key_value_pairs_model_json['key'] = table_cell_key_model + table_key_value_pairs_model_json['value'] = [table_cell_values_model] + + # Construct a model instance of TableKeyValuePairs by calling from_dict on the json representation + table_key_value_pairs_model = TableKeyValuePairs.from_dict(table_key_value_pairs_model_json) + assert table_key_value_pairs_model != False + + # Construct a model instance of TableKeyValuePairs by calling from_dict on the json representation + table_key_value_pairs_model_dict = TableKeyValuePairs.from_dict(table_key_value_pairs_model_json).__dict__ + table_key_value_pairs_model2 = TableKeyValuePairs(**table_key_value_pairs_model_dict) + + # Verify the model instances are equivalent + assert table_key_value_pairs_model == table_key_value_pairs_model2 + + # Convert model instance back to dict and verify no loss of data + table_key_value_pairs_model_json2 = table_key_value_pairs_model.to_dict() + assert table_key_value_pairs_model_json2 == table_key_value_pairs_model_json + + +class TestModel_TableResultTable: + """ + Test Class for TableResultTable + """ + + def test_table_result_table_serialization(self): + """ + Test serialization/deserialization for TableResultTable + """ + + # Construct dict forms of any model objects needed in order to build this model. + + table_element_location_model = {} # TableElementLocation + table_element_location_model['begin'] = 26 + table_element_location_model['end'] = 26 + + table_text_location_model = {} # TableTextLocation + table_text_location_model['text'] = 'testString' + table_text_location_model['location'] = table_element_location_model + + table_headers_model = {} # TableHeaders + table_headers_model['cell_id'] = 'testString' + table_headers_model['location'] = table_element_location_model + table_headers_model['text'] = 'testString' + table_headers_model['row_index_begin'] = 26 + table_headers_model['row_index_end'] = 26 + table_headers_model['column_index_begin'] = 26 + table_headers_model['column_index_end'] = 26 + + table_row_headers_model = {} # TableRowHeaders + table_row_headers_model['cell_id'] = 'testString' + table_row_headers_model['location'] = table_element_location_model + table_row_headers_model['text'] = 'testString' + table_row_headers_model['text_normalized'] = 'testString' + table_row_headers_model['row_index_begin'] = 26 + table_row_headers_model['row_index_end'] = 26 + table_row_headers_model['column_index_begin'] = 26 + table_row_headers_model['column_index_end'] = 26 + + table_column_headers_model = {} # TableColumnHeaders + table_column_headers_model['cell_id'] = 'testString' + table_column_headers_model['location'] = table_element_location_model + table_column_headers_model['text'] = 'testString' + table_column_headers_model['text_normalized'] = 'testString' + table_column_headers_model['row_index_begin'] = 26 + table_column_headers_model['row_index_end'] = 26 + table_column_headers_model['column_index_begin'] = 26 + table_column_headers_model['column_index_end'] = 26 + + table_cell_key_model = {} # TableCellKey + table_cell_key_model['cell_id'] = 'testString' + table_cell_key_model['location'] = table_element_location_model + table_cell_key_model['text'] = 'testString' + + table_cell_values_model = {} # TableCellValues + table_cell_values_model['cell_id'] = 'testString' + table_cell_values_model['location'] = table_element_location_model + table_cell_values_model['text'] = 'testString' + + table_key_value_pairs_model = {} # TableKeyValuePairs + table_key_value_pairs_model['key'] = table_cell_key_model + table_key_value_pairs_model['value'] = [table_cell_values_model] + + document_attribute_model = {} # DocumentAttribute + document_attribute_model['type'] = 'testString' + document_attribute_model['text'] = 'testString' + document_attribute_model['location'] = table_element_location_model + + table_body_cells_model = {} # TableBodyCells + table_body_cells_model['cell_id'] = 'testString' + table_body_cells_model['location'] = table_element_location_model + table_body_cells_model['text'] = 'testString' + table_body_cells_model['row_index_begin'] = 26 + table_body_cells_model['row_index_end'] = 26 + table_body_cells_model['column_index_begin'] = 26 + table_body_cells_model['column_index_end'] = 26 + table_body_cells_model['row_header_ids'] = ['testString'] + table_body_cells_model['row_header_texts'] = ['testString'] + table_body_cells_model['row_header_texts_normalized'] = ['testString'] + table_body_cells_model['column_header_ids'] = ['testString'] + table_body_cells_model['column_header_texts'] = ['testString'] + table_body_cells_model['column_header_texts_normalized'] = ['testString'] + table_body_cells_model['attributes'] = [document_attribute_model] + + # Construct a json representation of a TableResultTable model + table_result_table_model_json = {} + table_result_table_model_json['location'] = table_element_location_model + table_result_table_model_json['text'] = 'testString' + table_result_table_model_json['section_title'] = table_text_location_model + table_result_table_model_json['title'] = table_text_location_model + table_result_table_model_json['table_headers'] = [table_headers_model] + table_result_table_model_json['row_headers'] = [table_row_headers_model] + table_result_table_model_json['column_headers'] = [table_column_headers_model] + table_result_table_model_json['key_value_pairs'] = [table_key_value_pairs_model] + table_result_table_model_json['body_cells'] = [table_body_cells_model] + table_result_table_model_json['contexts'] = [table_text_location_model] + + # Construct a model instance of TableResultTable by calling from_dict on the json representation + table_result_table_model = TableResultTable.from_dict(table_result_table_model_json) + assert table_result_table_model != False + + # Construct a model instance of TableResultTable by calling from_dict on the json representation + table_result_table_model_dict = TableResultTable.from_dict(table_result_table_model_json).__dict__ + table_result_table_model2 = TableResultTable(**table_result_table_model_dict) + + # Verify the model instances are equivalent + assert table_result_table_model == table_result_table_model2 + + # Convert model instance back to dict and verify no loss of data + table_result_table_model_json2 = table_result_table_model.to_dict() + assert table_result_table_model_json2 == table_result_table_model_json + + +class TestModel_TableRowHeaders: + """ + Test Class for TableRowHeaders + """ + + def test_table_row_headers_serialization(self): + """ + Test serialization/deserialization for TableRowHeaders + """ + + # Construct dict forms of any model objects needed in order to build this model. + + table_element_location_model = {} # TableElementLocation + table_element_location_model['begin'] = 26 + table_element_location_model['end'] = 26 + + # Construct a json representation of a TableRowHeaders model + table_row_headers_model_json = {} + table_row_headers_model_json['cell_id'] = 'testString' + table_row_headers_model_json['location'] = table_element_location_model + table_row_headers_model_json['text'] = 'testString' + table_row_headers_model_json['text_normalized'] = 'testString' + table_row_headers_model_json['row_index_begin'] = 26 + table_row_headers_model_json['row_index_end'] = 26 + table_row_headers_model_json['column_index_begin'] = 26 + table_row_headers_model_json['column_index_end'] = 26 + + # Construct a model instance of TableRowHeaders by calling from_dict on the json representation + table_row_headers_model = TableRowHeaders.from_dict(table_row_headers_model_json) + assert table_row_headers_model != False + + # Construct a model instance of TableRowHeaders by calling from_dict on the json representation + table_row_headers_model_dict = TableRowHeaders.from_dict(table_row_headers_model_json).__dict__ + table_row_headers_model2 = TableRowHeaders(**table_row_headers_model_dict) + + # Verify the model instances are equivalent + assert table_row_headers_model == table_row_headers_model2 + + # Convert model instance back to dict and verify no loss of data + table_row_headers_model_json2 = table_row_headers_model.to_dict() + assert table_row_headers_model_json2 == table_row_headers_model_json + + +class TestModel_TableTextLocation: + """ + Test Class for TableTextLocation + """ + + def test_table_text_location_serialization(self): + """ + Test serialization/deserialization for TableTextLocation + """ + + # Construct dict forms of any model objects needed in order to build this model. + + table_element_location_model = {} # TableElementLocation + table_element_location_model['begin'] = 26 + table_element_location_model['end'] = 26 + + # Construct a json representation of a TableTextLocation model + table_text_location_model_json = {} + table_text_location_model_json['text'] = 'testString' + table_text_location_model_json['location'] = table_element_location_model + + # Construct a model instance of TableTextLocation by calling from_dict on the json representation + table_text_location_model = TableTextLocation.from_dict(table_text_location_model_json) + assert table_text_location_model != False + + # Construct a model instance of TableTextLocation by calling from_dict on the json representation + table_text_location_model_dict = TableTextLocation.from_dict(table_text_location_model_json).__dict__ + table_text_location_model2 = TableTextLocation(**table_text_location_model_dict) + + # Verify the model instances are equivalent + assert table_text_location_model == table_text_location_model2 + + # Convert model instance back to dict and verify no loss of data + table_text_location_model_json2 = table_text_location_model.to_dict() + assert table_text_location_model_json2 == table_text_location_model_json + + +class TestModel_TrainingExample: + """ + Test Class for TrainingExample + """ + + def test_training_example_serialization(self): + """ + Test serialization/deserialization for TrainingExample + """ + + # Construct a json representation of a TrainingExample model + training_example_model_json = {} + training_example_model_json['document_id'] = 'testString' + training_example_model_json['collection_id'] = 'testString' + training_example_model_json['relevance'] = 38 + + # Construct a model instance of TrainingExample by calling from_dict on the json representation + training_example_model = TrainingExample.from_dict(training_example_model_json) + assert training_example_model != False + + # Construct a model instance of TrainingExample by calling from_dict on the json representation + training_example_model_dict = TrainingExample.from_dict(training_example_model_json).__dict__ + training_example_model2 = TrainingExample(**training_example_model_dict) + + # Verify the model instances are equivalent + assert training_example_model == training_example_model2 + + # Convert model instance back to dict and verify no loss of data + training_example_model_json2 = training_example_model.to_dict() + assert training_example_model_json2 == training_example_model_json + + +class TestModel_TrainingQuery: + """ + Test Class for TrainingQuery + """ + + def test_training_query_serialization(self): + """ + Test serialization/deserialization for TrainingQuery + """ + + # Construct dict forms of any model objects needed in order to build this model. + + training_example_model = {} # TrainingExample + training_example_model['document_id'] = 'testString' + training_example_model['collection_id'] = 'testString' + training_example_model['relevance'] = 38 + + # Construct a json representation of a TrainingQuery model + training_query_model_json = {} + training_query_model_json['natural_language_query'] = 'testString' + training_query_model_json['filter'] = 'testString' + training_query_model_json['examples'] = [training_example_model] + + # Construct a model instance of TrainingQuery by calling from_dict on the json representation + training_query_model = TrainingQuery.from_dict(training_query_model_json) + assert training_query_model != False + + # Construct a model instance of TrainingQuery by calling from_dict on the json representation + training_query_model_dict = TrainingQuery.from_dict(training_query_model_json).__dict__ + training_query_model2 = TrainingQuery(**training_query_model_dict) + + # Verify the model instances are equivalent + assert training_query_model == training_query_model2 + + # Convert model instance back to dict and verify no loss of data + training_query_model_json2 = training_query_model.to_dict() + assert training_query_model_json2 == training_query_model_json + + +class TestModel_TrainingQuerySet: + """ + Test Class for TrainingQuerySet + """ + + def test_training_query_set_serialization(self): + """ + Test serialization/deserialization for TrainingQuerySet + """ + + # Construct dict forms of any model objects needed in order to build this model. + + training_example_model = {} # TrainingExample + training_example_model['document_id'] = 'testString' + training_example_model['collection_id'] = 'testString' + training_example_model['relevance'] = 38 + + training_query_model = {} # TrainingQuery + training_query_model['natural_language_query'] = 'testString' + training_query_model['filter'] = 'testString' + training_query_model['examples'] = [training_example_model] + + # Construct a json representation of a TrainingQuerySet model + training_query_set_model_json = {} + training_query_set_model_json['queries'] = [training_query_model] + + # Construct a model instance of TrainingQuerySet by calling from_dict on the json representation + training_query_set_model = TrainingQuerySet.from_dict(training_query_set_model_json) + assert training_query_set_model != False + + # Construct a model instance of TrainingQuerySet by calling from_dict on the json representation + training_query_set_model_dict = TrainingQuerySet.from_dict(training_query_set_model_json).__dict__ + training_query_set_model2 = TrainingQuerySet(**training_query_set_model_dict) + + # Verify the model instances are equivalent + assert training_query_set_model == training_query_set_model2 + + # Convert model instance back to dict and verify no loss of data + training_query_set_model_json2 = training_query_set_model.to_dict() + assert training_query_set_model_json2 == training_query_set_model_json + + +class TestModel_UpdateDocumentClassifier: + """ + Test Class for UpdateDocumentClassifier + """ + + def test_update_document_classifier_serialization(self): + """ + Test serialization/deserialization for UpdateDocumentClassifier + """ + + # Construct a json representation of a UpdateDocumentClassifier model + update_document_classifier_model_json = {} + update_document_classifier_model_json['name'] = 'testString' + update_document_classifier_model_json['description'] = 'testString' + + # Construct a model instance of UpdateDocumentClassifier by calling from_dict on the json representation + update_document_classifier_model = UpdateDocumentClassifier.from_dict(update_document_classifier_model_json) + assert update_document_classifier_model != False + + # Construct a model instance of UpdateDocumentClassifier by calling from_dict on the json representation + update_document_classifier_model_dict = UpdateDocumentClassifier.from_dict(update_document_classifier_model_json).__dict__ + update_document_classifier_model2 = UpdateDocumentClassifier(**update_document_classifier_model_dict) + + # Verify the model instances are equivalent + assert update_document_classifier_model == update_document_classifier_model2 + + # Convert model instance back to dict and verify no loss of data + update_document_classifier_model_json2 = update_document_classifier_model.to_dict() + assert update_document_classifier_model_json2 == update_document_classifier_model_json + + +class TestModel_WebhookHeader: + """ + Test Class for WebhookHeader + """ + + def test_webhook_header_serialization(self): + """ + Test serialization/deserialization for WebhookHeader + """ + + # Construct a json representation of a WebhookHeader model + webhook_header_model_json = {} + webhook_header_model_json['name'] = 'testString' + webhook_header_model_json['value'] = 'testString' + + # Construct a model instance of WebhookHeader by calling from_dict on the json representation + webhook_header_model = WebhookHeader.from_dict(webhook_header_model_json) + assert webhook_header_model != False + + # Construct a model instance of WebhookHeader by calling from_dict on the json representation + webhook_header_model_dict = WebhookHeader.from_dict(webhook_header_model_json).__dict__ + webhook_header_model2 = WebhookHeader(**webhook_header_model_dict) + + # Verify the model instances are equivalent + assert webhook_header_model == webhook_header_model2 + + # Convert model instance back to dict and verify no loss of data + webhook_header_model_json2 = webhook_header_model.to_dict() + assert webhook_header_model_json2 == webhook_header_model_json + + +class TestModel_PullBatchesResponse: + """ + Test Class for PullBatchesResponse + """ + + def test_pull_batches_response_serialization(self): + """ + Test serialization/deserialization for PullBatchesResponse + """ + + # Construct a json representation of a PullBatchesResponse model + pull_batches_response_model_json = {} + pull_batches_response_model_json['file'] = 'testString' + + # Construct a model instance of PullBatchesResponse by calling from_dict on the json representation + pull_batches_response_model = PullBatchesResponse.from_dict(pull_batches_response_model_json) + assert pull_batches_response_model != False + + # Construct a model instance of PullBatchesResponse by calling from_dict on the json representation + pull_batches_response_model_dict = PullBatchesResponse.from_dict(pull_batches_response_model_json).__dict__ + pull_batches_response_model2 = PullBatchesResponse(**pull_batches_response_model_dict) + + # Verify the model instances are equivalent + assert pull_batches_response_model == pull_batches_response_model2 + + # Convert model instance back to dict and verify no loss of data + pull_batches_response_model_json2 = pull_batches_response_model.to_dict() + assert pull_batches_response_model_json2 == pull_batches_response_model_json + + +class TestModel_QueryAggregationQueryCalculationAggregation: + """ + Test Class for QueryAggregationQueryCalculationAggregation + """ + + def test_query_aggregation_query_calculation_aggregation_serialization(self): + """ + Test serialization/deserialization for QueryAggregationQueryCalculationAggregation + """ + + # Construct a json representation of a QueryAggregationQueryCalculationAggregation model + query_aggregation_query_calculation_aggregation_model_json = {} + query_aggregation_query_calculation_aggregation_model_json['type'] = 'unique_count' + query_aggregation_query_calculation_aggregation_model_json['field'] = 'testString' + query_aggregation_query_calculation_aggregation_model_json['value'] = 72.5 + + # Construct a model instance of QueryAggregationQueryCalculationAggregation by calling from_dict on the json representation + query_aggregation_query_calculation_aggregation_model = QueryAggregationQueryCalculationAggregation.from_dict(query_aggregation_query_calculation_aggregation_model_json) + assert query_aggregation_query_calculation_aggregation_model != False + + # Construct a model instance of QueryAggregationQueryCalculationAggregation by calling from_dict on the json representation + query_aggregation_query_calculation_aggregation_model_dict = QueryAggregationQueryCalculationAggregation.from_dict(query_aggregation_query_calculation_aggregation_model_json).__dict__ + query_aggregation_query_calculation_aggregation_model2 = QueryAggregationQueryCalculationAggregation(**query_aggregation_query_calculation_aggregation_model_dict) + + # Verify the model instances are equivalent + assert query_aggregation_query_calculation_aggregation_model == query_aggregation_query_calculation_aggregation_model2 + + # Convert model instance back to dict and verify no loss of data + query_aggregation_query_calculation_aggregation_model_json2 = query_aggregation_query_calculation_aggregation_model.to_dict() + assert query_aggregation_query_calculation_aggregation_model_json2 == query_aggregation_query_calculation_aggregation_model_json + + +class TestModel_QueryAggregationQueryFilterAggregation: + """ + Test Class for QueryAggregationQueryFilterAggregation + """ + + def test_query_aggregation_query_filter_aggregation_serialization(self): + """ + Test serialization/deserialization for QueryAggregationQueryFilterAggregation + """ + + # Construct a json representation of a QueryAggregationQueryFilterAggregation model + query_aggregation_query_filter_aggregation_model_json = {} + query_aggregation_query_filter_aggregation_model_json['type'] = 'filter' + query_aggregation_query_filter_aggregation_model_json['match'] = 'testString' + query_aggregation_query_filter_aggregation_model_json['matching_results'] = 26 + query_aggregation_query_filter_aggregation_model_json['aggregations'] = [{'anyKey': 'anyValue'}] + + # Construct a model instance of QueryAggregationQueryFilterAggregation by calling from_dict on the json representation + query_aggregation_query_filter_aggregation_model = QueryAggregationQueryFilterAggregation.from_dict(query_aggregation_query_filter_aggregation_model_json) + assert query_aggregation_query_filter_aggregation_model != False + + # Construct a model instance of QueryAggregationQueryFilterAggregation by calling from_dict on the json representation + query_aggregation_query_filter_aggregation_model_dict = QueryAggregationQueryFilterAggregation.from_dict(query_aggregation_query_filter_aggregation_model_json).__dict__ + query_aggregation_query_filter_aggregation_model2 = QueryAggregationQueryFilterAggregation(**query_aggregation_query_filter_aggregation_model_dict) + + # Verify the model instances are equivalent + assert query_aggregation_query_filter_aggregation_model == query_aggregation_query_filter_aggregation_model2 + + # Convert model instance back to dict and verify no loss of data + query_aggregation_query_filter_aggregation_model_json2 = query_aggregation_query_filter_aggregation_model.to_dict() + assert query_aggregation_query_filter_aggregation_model_json2 == query_aggregation_query_filter_aggregation_model_json + + +class TestModel_QueryAggregationQueryGroupByAggregation: + """ + Test Class for QueryAggregationQueryGroupByAggregation + """ + + def test_query_aggregation_query_group_by_aggregation_serialization(self): + """ + Test serialization/deserialization for QueryAggregationQueryGroupByAggregation + """ + + # Construct dict forms of any model objects needed in order to build this model. + + query_group_by_aggregation_result_model = {} # QueryGroupByAggregationResult + query_group_by_aggregation_result_model['key'] = 'testString' + query_group_by_aggregation_result_model['matching_results'] = 38 + query_group_by_aggregation_result_model['relevancy'] = 72.5 + query_group_by_aggregation_result_model['total_matching_documents'] = 38 + query_group_by_aggregation_result_model['estimated_matching_results'] = 72.5 + query_group_by_aggregation_result_model['aggregations'] = [{'anyKey': 'anyValue'}] + + # Construct a json representation of a QueryAggregationQueryGroupByAggregation model + query_aggregation_query_group_by_aggregation_model_json = {} + query_aggregation_query_group_by_aggregation_model_json['type'] = 'group_by' + query_aggregation_query_group_by_aggregation_model_json['results'] = [query_group_by_aggregation_result_model] + + # Construct a model instance of QueryAggregationQueryGroupByAggregation by calling from_dict on the json representation + query_aggregation_query_group_by_aggregation_model = QueryAggregationQueryGroupByAggregation.from_dict(query_aggregation_query_group_by_aggregation_model_json) + assert query_aggregation_query_group_by_aggregation_model != False + + # Construct a model instance of QueryAggregationQueryGroupByAggregation by calling from_dict on the json representation + query_aggregation_query_group_by_aggregation_model_dict = QueryAggregationQueryGroupByAggregation.from_dict(query_aggregation_query_group_by_aggregation_model_json).__dict__ + query_aggregation_query_group_by_aggregation_model2 = QueryAggregationQueryGroupByAggregation(**query_aggregation_query_group_by_aggregation_model_dict) + + # Verify the model instances are equivalent + assert query_aggregation_query_group_by_aggregation_model == query_aggregation_query_group_by_aggregation_model2 + + # Convert model instance back to dict and verify no loss of data + query_aggregation_query_group_by_aggregation_model_json2 = query_aggregation_query_group_by_aggregation_model.to_dict() + assert query_aggregation_query_group_by_aggregation_model_json2 == query_aggregation_query_group_by_aggregation_model_json + + +class TestModel_QueryAggregationQueryHistogramAggregation: + """ + Test Class for QueryAggregationQueryHistogramAggregation + """ + + def test_query_aggregation_query_histogram_aggregation_serialization(self): + """ + Test serialization/deserialization for QueryAggregationQueryHistogramAggregation + """ + + # Construct dict forms of any model objects needed in order to build this model. + + query_histogram_aggregation_result_model = {} # QueryHistogramAggregationResult + query_histogram_aggregation_result_model['key'] = 26 + query_histogram_aggregation_result_model['matching_results'] = 38 + query_histogram_aggregation_result_model['aggregations'] = [{'anyKey': 'anyValue'}] + + # Construct a json representation of a QueryAggregationQueryHistogramAggregation model + query_aggregation_query_histogram_aggregation_model_json = {} + query_aggregation_query_histogram_aggregation_model_json['type'] = 'histogram' + query_aggregation_query_histogram_aggregation_model_json['field'] = 'testString' + query_aggregation_query_histogram_aggregation_model_json['interval'] = 38 + query_aggregation_query_histogram_aggregation_model_json['name'] = 'testString' + query_aggregation_query_histogram_aggregation_model_json['results'] = [query_histogram_aggregation_result_model] + + # Construct a model instance of QueryAggregationQueryHistogramAggregation by calling from_dict on the json representation + query_aggregation_query_histogram_aggregation_model = QueryAggregationQueryHistogramAggregation.from_dict(query_aggregation_query_histogram_aggregation_model_json) + assert query_aggregation_query_histogram_aggregation_model != False + + # Construct a model instance of QueryAggregationQueryHistogramAggregation by calling from_dict on the json representation + query_aggregation_query_histogram_aggregation_model_dict = QueryAggregationQueryHistogramAggregation.from_dict(query_aggregation_query_histogram_aggregation_model_json).__dict__ + query_aggregation_query_histogram_aggregation_model2 = QueryAggregationQueryHistogramAggregation(**query_aggregation_query_histogram_aggregation_model_dict) + + # Verify the model instances are equivalent + assert query_aggregation_query_histogram_aggregation_model == query_aggregation_query_histogram_aggregation_model2 + + # Convert model instance back to dict and verify no loss of data + query_aggregation_query_histogram_aggregation_model_json2 = query_aggregation_query_histogram_aggregation_model.to_dict() + assert query_aggregation_query_histogram_aggregation_model_json2 == query_aggregation_query_histogram_aggregation_model_json + + +class TestModel_QueryAggregationQueryNestedAggregation: + """ + Test Class for QueryAggregationQueryNestedAggregation + """ + + def test_query_aggregation_query_nested_aggregation_serialization(self): + """ + Test serialization/deserialization for QueryAggregationQueryNestedAggregation + """ + + # Construct a json representation of a QueryAggregationQueryNestedAggregation model + query_aggregation_query_nested_aggregation_model_json = {} + query_aggregation_query_nested_aggregation_model_json['type'] = 'nested' + query_aggregation_query_nested_aggregation_model_json['path'] = 'testString' + query_aggregation_query_nested_aggregation_model_json['matching_results'] = 26 + query_aggregation_query_nested_aggregation_model_json['aggregations'] = [{'anyKey': 'anyValue'}] + + # Construct a model instance of QueryAggregationQueryNestedAggregation by calling from_dict on the json representation + query_aggregation_query_nested_aggregation_model = QueryAggregationQueryNestedAggregation.from_dict(query_aggregation_query_nested_aggregation_model_json) + assert query_aggregation_query_nested_aggregation_model != False + + # Construct a model instance of QueryAggregationQueryNestedAggregation by calling from_dict on the json representation + query_aggregation_query_nested_aggregation_model_dict = QueryAggregationQueryNestedAggregation.from_dict(query_aggregation_query_nested_aggregation_model_json).__dict__ + query_aggregation_query_nested_aggregation_model2 = QueryAggregationQueryNestedAggregation(**query_aggregation_query_nested_aggregation_model_dict) + + # Verify the model instances are equivalent + assert query_aggregation_query_nested_aggregation_model == query_aggregation_query_nested_aggregation_model2 + + # Convert model instance back to dict and verify no loss of data + query_aggregation_query_nested_aggregation_model_json2 = query_aggregation_query_nested_aggregation_model.to_dict() + assert query_aggregation_query_nested_aggregation_model_json2 == query_aggregation_query_nested_aggregation_model_json + + +class TestModel_QueryAggregationQueryPairAggregation: + """ + Test Class for QueryAggregationQueryPairAggregation + """ + + def test_query_aggregation_query_pair_aggregation_serialization(self): + """ + Test serialization/deserialization for QueryAggregationQueryPairAggregation + """ + + # Construct dict forms of any model objects needed in order to build this model. + + query_pair_aggregation_result_model = {} # QueryPairAggregationResult + query_pair_aggregation_result_model['aggregations'] = [{'anyKey': 'anyValue'}] + + # Construct a json representation of a QueryAggregationQueryPairAggregation model + query_aggregation_query_pair_aggregation_model_json = {} + query_aggregation_query_pair_aggregation_model_json['type'] = 'pair' + query_aggregation_query_pair_aggregation_model_json['first'] = 'testString' + query_aggregation_query_pair_aggregation_model_json['second'] = 'testString' + query_aggregation_query_pair_aggregation_model_json['show_estimated_matching_results'] = False + query_aggregation_query_pair_aggregation_model_json['show_total_matching_documents'] = False + query_aggregation_query_pair_aggregation_model_json['results'] = [query_pair_aggregation_result_model] + + # Construct a model instance of QueryAggregationQueryPairAggregation by calling from_dict on the json representation + query_aggregation_query_pair_aggregation_model = QueryAggregationQueryPairAggregation.from_dict(query_aggregation_query_pair_aggregation_model_json) + assert query_aggregation_query_pair_aggregation_model != False + + # Construct a model instance of QueryAggregationQueryPairAggregation by calling from_dict on the json representation + query_aggregation_query_pair_aggregation_model_dict = QueryAggregationQueryPairAggregation.from_dict(query_aggregation_query_pair_aggregation_model_json).__dict__ + query_aggregation_query_pair_aggregation_model2 = QueryAggregationQueryPairAggregation(**query_aggregation_query_pair_aggregation_model_dict) + + # Verify the model instances are equivalent + assert query_aggregation_query_pair_aggregation_model == query_aggregation_query_pair_aggregation_model2 + + # Convert model instance back to dict and verify no loss of data + query_aggregation_query_pair_aggregation_model_json2 = query_aggregation_query_pair_aggregation_model.to_dict() + assert query_aggregation_query_pair_aggregation_model_json2 == query_aggregation_query_pair_aggregation_model_json + + +class TestModel_QueryAggregationQueryTermAggregation: + """ + Test Class for QueryAggregationQueryTermAggregation + """ + + def test_query_aggregation_query_term_aggregation_serialization(self): + """ + Test serialization/deserialization for QueryAggregationQueryTermAggregation + """ + + # Construct dict forms of any model objects needed in order to build this model. + + query_term_aggregation_result_model = {} # QueryTermAggregationResult + query_term_aggregation_result_model['key'] = 'testString' + query_term_aggregation_result_model['matching_results'] = 38 + query_term_aggregation_result_model['relevancy'] = 72.5 + query_term_aggregation_result_model['total_matching_documents'] = 38 + query_term_aggregation_result_model['estimated_matching_results'] = 72.5 + query_term_aggregation_result_model['aggregations'] = [{'anyKey': 'anyValue'}] + + # Construct a json representation of a QueryAggregationQueryTermAggregation model + query_aggregation_query_term_aggregation_model_json = {} + query_aggregation_query_term_aggregation_model_json['type'] = 'term' + query_aggregation_query_term_aggregation_model_json['field'] = 'testString' + query_aggregation_query_term_aggregation_model_json['count'] = 38 + query_aggregation_query_term_aggregation_model_json['name'] = 'testString' + query_aggregation_query_term_aggregation_model_json['results'] = [query_term_aggregation_result_model] + + # Construct a model instance of QueryAggregationQueryTermAggregation by calling from_dict on the json representation + query_aggregation_query_term_aggregation_model = QueryAggregationQueryTermAggregation.from_dict(query_aggregation_query_term_aggregation_model_json) + assert query_aggregation_query_term_aggregation_model != False + + # Construct a model instance of QueryAggregationQueryTermAggregation by calling from_dict on the json representation + query_aggregation_query_term_aggregation_model_dict = QueryAggregationQueryTermAggregation.from_dict(query_aggregation_query_term_aggregation_model_json).__dict__ + query_aggregation_query_term_aggregation_model2 = QueryAggregationQueryTermAggregation(**query_aggregation_query_term_aggregation_model_dict) + + # Verify the model instances are equivalent + assert query_aggregation_query_term_aggregation_model == query_aggregation_query_term_aggregation_model2 + + # Convert model instance back to dict and verify no loss of data + query_aggregation_query_term_aggregation_model_json2 = query_aggregation_query_term_aggregation_model.to_dict() + assert query_aggregation_query_term_aggregation_model_json2 == query_aggregation_query_term_aggregation_model_json + + +class TestModel_QueryAggregationQueryTimesliceAggregation: + """ + Test Class for QueryAggregationQueryTimesliceAggregation + """ + + def test_query_aggregation_query_timeslice_aggregation_serialization(self): + """ + Test serialization/deserialization for QueryAggregationQueryTimesliceAggregation + """ + + # Construct dict forms of any model objects needed in order to build this model. + + query_timeslice_aggregation_result_model = {} # QueryTimesliceAggregationResult + query_timeslice_aggregation_result_model['key_as_string'] = 'testString' + query_timeslice_aggregation_result_model['key'] = 26 + query_timeslice_aggregation_result_model['matching_results'] = 26 + query_timeslice_aggregation_result_model['aggregations'] = [{'anyKey': 'anyValue'}] + + # Construct a json representation of a QueryAggregationQueryTimesliceAggregation model + query_aggregation_query_timeslice_aggregation_model_json = {} + query_aggregation_query_timeslice_aggregation_model_json['type'] = 'timeslice' + query_aggregation_query_timeslice_aggregation_model_json['field'] = 'testString' + query_aggregation_query_timeslice_aggregation_model_json['interval'] = 'testString' + query_aggregation_query_timeslice_aggregation_model_json['name'] = 'testString' + query_aggregation_query_timeslice_aggregation_model_json['results'] = [query_timeslice_aggregation_result_model] + + # Construct a model instance of QueryAggregationQueryTimesliceAggregation by calling from_dict on the json representation + query_aggregation_query_timeslice_aggregation_model = QueryAggregationQueryTimesliceAggregation.from_dict(query_aggregation_query_timeslice_aggregation_model_json) + assert query_aggregation_query_timeslice_aggregation_model != False + + # Construct a model instance of QueryAggregationQueryTimesliceAggregation by calling from_dict on the json representation + query_aggregation_query_timeslice_aggregation_model_dict = QueryAggregationQueryTimesliceAggregation.from_dict(query_aggregation_query_timeslice_aggregation_model_json).__dict__ + query_aggregation_query_timeslice_aggregation_model2 = QueryAggregationQueryTimesliceAggregation(**query_aggregation_query_timeslice_aggregation_model_dict) + + # Verify the model instances are equivalent + assert query_aggregation_query_timeslice_aggregation_model == query_aggregation_query_timeslice_aggregation_model2 + + # Convert model instance back to dict and verify no loss of data + query_aggregation_query_timeslice_aggregation_model_json2 = query_aggregation_query_timeslice_aggregation_model.to_dict() + assert query_aggregation_query_timeslice_aggregation_model_json2 == query_aggregation_query_timeslice_aggregation_model_json + + +class TestModel_QueryAggregationQueryTopHitsAggregation: + """ + Test Class for QueryAggregationQueryTopHitsAggregation + """ + + def test_query_aggregation_query_top_hits_aggregation_serialization(self): + """ + Test serialization/deserialization for QueryAggregationQueryTopHitsAggregation + """ + + # Construct dict forms of any model objects needed in order to build this model. + + query_top_hits_aggregation_result_model = {} # QueryTopHitsAggregationResult + query_top_hits_aggregation_result_model['matching_results'] = 38 + query_top_hits_aggregation_result_model['hits'] = [{'anyKey': 'anyValue'}] + + # Construct a json representation of a QueryAggregationQueryTopHitsAggregation model + query_aggregation_query_top_hits_aggregation_model_json = {} + query_aggregation_query_top_hits_aggregation_model_json['type'] = 'top_hits' + query_aggregation_query_top_hits_aggregation_model_json['size'] = 38 + query_aggregation_query_top_hits_aggregation_model_json['name'] = 'testString' + query_aggregation_query_top_hits_aggregation_model_json['hits'] = query_top_hits_aggregation_result_model + + # Construct a model instance of QueryAggregationQueryTopHitsAggregation by calling from_dict on the json representation + query_aggregation_query_top_hits_aggregation_model = QueryAggregationQueryTopHitsAggregation.from_dict(query_aggregation_query_top_hits_aggregation_model_json) + assert query_aggregation_query_top_hits_aggregation_model != False + + # Construct a model instance of QueryAggregationQueryTopHitsAggregation by calling from_dict on the json representation + query_aggregation_query_top_hits_aggregation_model_dict = QueryAggregationQueryTopHitsAggregation.from_dict(query_aggregation_query_top_hits_aggregation_model_json).__dict__ + query_aggregation_query_top_hits_aggregation_model2 = QueryAggregationQueryTopHitsAggregation(**query_aggregation_query_top_hits_aggregation_model_dict) + + # Verify the model instances are equivalent + assert query_aggregation_query_top_hits_aggregation_model == query_aggregation_query_top_hits_aggregation_model2 + + # Convert model instance back to dict and verify no loss of data + query_aggregation_query_top_hits_aggregation_model_json2 = query_aggregation_query_top_hits_aggregation_model.to_dict() + assert query_aggregation_query_top_hits_aggregation_model_json2 == query_aggregation_query_top_hits_aggregation_model_json + + +class TestModel_QueryAggregationQueryTopicAggregation: + """ + Test Class for QueryAggregationQueryTopicAggregation + """ + + def test_query_aggregation_query_topic_aggregation_serialization(self): + """ + Test serialization/deserialization for QueryAggregationQueryTopicAggregation + """ + + # Construct dict forms of any model objects needed in order to build this model. + + query_topic_aggregation_result_model = {} # QueryTopicAggregationResult + query_topic_aggregation_result_model['aggregations'] = [{'anyKey': 'anyValue'}] + + # Construct a json representation of a QueryAggregationQueryTopicAggregation model + query_aggregation_query_topic_aggregation_model_json = {} + query_aggregation_query_topic_aggregation_model_json['type'] = 'topic' + query_aggregation_query_topic_aggregation_model_json['facet'] = 'testString' + query_aggregation_query_topic_aggregation_model_json['time_segments'] = 'testString' + query_aggregation_query_topic_aggregation_model_json['show_estimated_matching_results'] = False + query_aggregation_query_topic_aggregation_model_json['show_total_matching_documents'] = False + query_aggregation_query_topic_aggregation_model_json['results'] = [query_topic_aggregation_result_model] + + # Construct a model instance of QueryAggregationQueryTopicAggregation by calling from_dict on the json representation + query_aggregation_query_topic_aggregation_model = QueryAggregationQueryTopicAggregation.from_dict(query_aggregation_query_topic_aggregation_model_json) + assert query_aggregation_query_topic_aggregation_model != False + + # Construct a model instance of QueryAggregationQueryTopicAggregation by calling from_dict on the json representation + query_aggregation_query_topic_aggregation_model_dict = QueryAggregationQueryTopicAggregation.from_dict(query_aggregation_query_topic_aggregation_model_json).__dict__ + query_aggregation_query_topic_aggregation_model2 = QueryAggregationQueryTopicAggregation(**query_aggregation_query_topic_aggregation_model_dict) + + # Verify the model instances are equivalent + assert query_aggregation_query_topic_aggregation_model == query_aggregation_query_topic_aggregation_model2 + + # Convert model instance back to dict and verify no loss of data + query_aggregation_query_topic_aggregation_model_json2 = query_aggregation_query_topic_aggregation_model.to_dict() + assert query_aggregation_query_topic_aggregation_model_json2 == query_aggregation_query_topic_aggregation_model_json + + +class TestModel_QueryAggregationQueryTrendAggregation: + """ + Test Class for QueryAggregationQueryTrendAggregation + """ + + def test_query_aggregation_query_trend_aggregation_serialization(self): + """ + Test serialization/deserialization for QueryAggregationQueryTrendAggregation + """ + + # Construct dict forms of any model objects needed in order to build this model. + + query_trend_aggregation_result_model = {} # QueryTrendAggregationResult + query_trend_aggregation_result_model['aggregations'] = [{'anyKey': 'anyValue'}] + + # Construct a json representation of a QueryAggregationQueryTrendAggregation model + query_aggregation_query_trend_aggregation_model_json = {} + query_aggregation_query_trend_aggregation_model_json['type'] = 'trend' + query_aggregation_query_trend_aggregation_model_json['facet'] = 'testString' + query_aggregation_query_trend_aggregation_model_json['time_segments'] = 'testString' + query_aggregation_query_trend_aggregation_model_json['show_estimated_matching_results'] = False + query_aggregation_query_trend_aggregation_model_json['show_total_matching_documents'] = False + query_aggregation_query_trend_aggregation_model_json['results'] = [query_trend_aggregation_result_model] + + # Construct a model instance of QueryAggregationQueryTrendAggregation by calling from_dict on the json representation + query_aggregation_query_trend_aggregation_model = QueryAggregationQueryTrendAggregation.from_dict(query_aggregation_query_trend_aggregation_model_json) + assert query_aggregation_query_trend_aggregation_model != False + + # Construct a model instance of QueryAggregationQueryTrendAggregation by calling from_dict on the json representation + query_aggregation_query_trend_aggregation_model_dict = QueryAggregationQueryTrendAggregation.from_dict(query_aggregation_query_trend_aggregation_model_json).__dict__ + query_aggregation_query_trend_aggregation_model2 = QueryAggregationQueryTrendAggregation(**query_aggregation_query_trend_aggregation_model_dict) + + # Verify the model instances are equivalent + assert query_aggregation_query_trend_aggregation_model == query_aggregation_query_trend_aggregation_model2 + + # Convert model instance back to dict and verify no loss of data + query_aggregation_query_trend_aggregation_model_json2 = query_aggregation_query_trend_aggregation_model.to_dict() + assert query_aggregation_query_trend_aggregation_model_json2 == query_aggregation_query_trend_aggregation_model_json + + +# endregion +############################################################################## +# End of Model Tests +############################################################################## diff --git a/test/unit/test_language_translator_v3.py b/test/unit/test_language_translator_v3.py deleted file mode 100644 index 4244d950e..000000000 --- a/test/unit/test_language_translator_v3.py +++ /dev/null @@ -1,306 +0,0 @@ -# coding=utf-8 - -import json -import os -import responses -import ibm_watson -from ibm_watson.language_translator_v3 import TranslationResult, TranslationModels, TranslationModel, IdentifiedLanguages, IdentifiableLanguages, DeleteModelResult - -platform_url = 'https://gateway.watsonplatform.net' -service_path = '/language-translator/api' -base_url = '{0}{1}'.format(platform_url, service_path) - -iam_url = "https://iam.cloud.ibm.com/identity/token" -iam_token_response = """{ - "access_token": "oAeisG8yqPY7sFR_x66Z15", - "token_type": "Bearer", - "expires_in": 3600, - "expiration": 1524167011, - "refresh_token": "jy4gl91BQ" -}""" - -######################### -# counterexamples -######################### - -@responses.activate -def test_translate_source_target(): - service = ibm_watson.LanguageTranslatorV3( - version='2018-05-01', - iam_apikey='iam_apikey') - endpoint = '/v3/translate' - url = '{0}{1}'.format(base_url, endpoint) - expected = { - "character_count": 19, - "translations": [{"translation": u"Hello, how are you ? \u20ac"}], - "word_count": 4 - } - responses.add( - responses.POST, - url, - body=json.dumps(expected), - status=200, - content_type='application/json') - responses.add(responses.POST, url=iam_url, body=iam_token_response, status=200) - - response = service.translate('Hola, cómo estás? €', source='es', target='en').get_result() - assert len(responses.calls) == 2 - assert responses.calls[1].request.url.startswith(url) - assert response == expected - TranslationResult._from_dict(response) - -@responses.activate -def test_translate_model_id(): - service = ibm_watson.LanguageTranslatorV3( - version='2018-05-01', - iam_apikey='iam_apikey') - endpoint = '/v3/translate' - url = '{0}{1}'.format(base_url, endpoint) - expected = { - "character_count": 22, - "translations": [ - { - "translation": "Messi es el mejor" - } - ], - "word_count": 5 - } - responses.add( - responses.POST, - url, - body=json.dumps(expected), - status=200, - content_type='application/json') - responses.add(responses.POST, url=iam_url, body=iam_token_response, status=200) - response = service.translate('Messi is the best ever', - model_id='en-es-conversational').get_result() - - assert len(responses.calls) == 2 - assert responses.calls[1].request.url.startswith(url) - assert response == expected - TranslationResult._from_dict(response) - -@responses.activate -def test_identify(): - service = ibm_watson.LanguageTranslatorV3( - version='2018-05-01', - iam_apikey='iam_apikey') - endpoint = '/v3/identify' - url = '{0}{1}'.format(base_url, endpoint) - expected = { - "languages": [ - { - "confidence": 0.477673, - "language": "zh" - }, - { - "confidence": 0.262053, - "language": "zh-TW" - }, - { - "confidence": 0.00958378, - "language": "en" - } - ] - } - responses.add( - responses.POST, - url, - body=json.dumps(expected), - status=200, - content_type='application/json') - responses.add(responses.POST, url=iam_url, body=iam_token_response, status=200) - response = service.identify('祝你有美好的一天').get_result() - assert len(responses.calls) == 2 - assert responses.calls[1].request.url.startswith(url) - assert response == expected - IdentifiedLanguages._from_dict(response) - -@responses.activate -def test_list_identifiable_languages(): - service = ibm_watson.LanguageTranslatorV3( - version='2018-05-01', - iam_apikey='iam_apikey') - endpoint = '/v3/identifiable_languages' - url = '{0}{1}'.format(base_url, endpoint) - expected = { - "languages": [ - { - "name": "German", - "language": "de" - }, - { - "name": "Greek", - "language": "el" - }, - { - "name": "English", - "language": "en" - }, - { - "name": "Esperanto", - "language": "eo" - }, - { - "name": "Spanish", - "language": "es" - }, - { - "name": "Chinese", - "language": "zh" - } - ] - } - responses.add( - responses.GET, - url, - body=json.dumps(expected), - status=200, - content_type='application/json') - responses.add(responses.POST, url=iam_url, body=iam_token_response, status=200) - response = service.list_identifiable_languages().get_result() - assert len(responses.calls) == 2 - assert responses.calls[1].request.url.startswith(url) - assert response == expected - IdentifiableLanguages._from_dict(response) - -@responses.activate -def test_create_model(): - service = ibm_watson.LanguageTranslatorV3( - version='2018-05-01', - username='xxx', - password='yyy' - ) - endpoint = '/v3/models' - url = '{0}{1}'.format(base_url, endpoint) - expected = { - "status": "available", - "model_id": "en-es-conversational", - "domain": "conversational", - "target": "es", - "customizable": False, - "source": "en", - "base_model_id": "en-es-conversational", - "owner": "", - "default_model": False, - "name": "test_glossary" - } - responses.add( - responses.POST, - url, - body=json.dumps(expected), - status=200, - content_type='application/json') - with open(os.path.join(os.path.dirname(__file__), '../../resources/language_translator_model.tmx'), 'rb') as custom_model: - response = service.create_model('en-fr', - name='test_glossary', - forced_glossary=custom_model).get_result() - assert len(responses.calls) == 1 - assert responses.calls[0].request.url.startswith(url) - assert response == expected - TranslationModel._from_dict(response) - -@responses.activate -def test_delete_model(): - service = ibm_watson.LanguageTranslatorV3( - version='2018-05-01', - iam_apikey='iam_apikey') - model_id = 'en-es-conversational' - endpoint = '/v3/models/' + model_id - url = '{0}{1}'.format(base_url, endpoint) - expected = { - "status": "OK", - } - responses.add( - responses.DELETE, - url, - body=json.dumps(expected), - status=200, - content_type='application/json') - responses.add(responses.POST, url=iam_url, body=iam_token_response, status=200) - response = service.delete_model(model_id).get_result() - assert len(responses.calls) == 2 - assert responses.calls[1].request.url.startswith(url) - assert response == expected - DeleteModelResult._from_dict(response) - -@responses.activate -def test_get_model(): - service = ibm_watson.LanguageTranslatorV3( - version='2018-05-01', - iam_apikey='iam_apikey') - model_id = 'en-es-conversational' - endpoint = '/v3/models/' + model_id - url = '{0}{1}'.format(base_url, endpoint) - expected = { - "status": "available", - "model_id": "en-es-conversational", - "domain": "conversational", - "target": "es", - "customizable": False, - "source": "en", - "base_model_id": "", - "owner": "", - "default_model": False, - "name": "en-es-conversational" - } - responses.add( - responses.GET, - url, - body=json.dumps(expected), - status=200, - content_type='application/json') - responses.add(responses.POST, url=iam_url, body=iam_token_response, status=200) - response = service.get_model(model_id).get_result() - assert len(responses.calls) == 2 - assert responses.calls[1].request.url.startswith(url) - assert response == expected - TranslationModel._from_dict(response) - -@responses.activate -def test_list_models(): - service = ibm_watson.LanguageTranslatorV3( - version='2018-05-01', - iam_apikey='iam_apikey') - endpoint = '/v3/models' - url = '{0}{1}'.format(base_url, endpoint) - expected = { - "models": [ - { - "status": "available", - "model_id": "en-es-conversational", - "domain": "conversational", - "target": "es", - "customizable": False, - "source": "en", - "base_model_id": "", - "owner": "", - "default_model": False, - "name": "en-es-conversational" - }, - { - "status": "available", - "model_id": "es-en", - "domain": "news", - "target": "en", - "customizable": True, - "source": "es", - "base_model_id": "", - "owner": "", - "default_model": True, - "name": "es-en" - } - ] - } - responses.add( - responses.GET, - url, - body=json.dumps(expected), - status=200, - content_type='application/json') - responses.add(responses.POST, url=iam_url, body=iam_token_response, status=200) - response = service.list_models().get_result() - assert len(responses.calls) == 2 - assert responses.calls[1].request.url.startswith(url) - assert response == expected - TranslationModels._from_dict(response) diff --git a/test/unit/test_natural_language_classifier_v1.py b/test/unit/test_natural_language_classifier_v1.py deleted file mode 100644 index 8146d9c4d..000000000 --- a/test/unit/test_natural_language_classifier_v1.py +++ /dev/null @@ -1,134 +0,0 @@ -# coding: utf-8 -import os -import responses -import ibm_watson - - -@responses.activate -def test_success(): - natural_language_classifier = ibm_watson.NaturalLanguageClassifierV1(username="username", - password="password") - - list_url = 'https://gateway.watsonplatform.net/natural-language-classifier/api/v1/classifiers' - list_response = '{"classifiers": [{"url": "https://gateway.watsonplatform.net/natural-language-classifier-' \ - 'experimental/api/v1/classifiers/497EF2-nlc-00", "classifier_id": "497EF2-nlc-00"}]}' - responses.add(responses.GET, list_url, - body=list_response, status=200, - content_type='application/json') - - natural_language_classifier.list_classifiers() - - assert responses.calls[0].request.url == list_url - assert responses.calls[0].response.text == list_response - - status_url = ('https://gateway.watsonplatform.net/natural-language-classifier/api/v1/classifiers/' - '497EF2-nlc-00') - status_response = '{"url": "https://gateway.watsonplatform.net/natural-language-classifier/api/v1/' \ - 'classifiers/497EF2-nlc-00", "status": "Available", "status_description": "The classifier ' \ - 'instance is now available and is ready to take classifier requests.", "classifier_id": ' \ - '"497EF2-nlc-00"}' - - responses.add(responses.GET, status_url, - body=status_response, status=200, - content_type='application/json') - - natural_language_classifier.get_classifier('497EF2-nlc-00') - - assert responses.calls[1].request.url == status_url - assert responses.calls[1].response.text == status_response - - classify_url = 'https://gateway.watsonplatform.net/natural-language-classifier/api/v1/classifiers/' \ - '497EF2-nlc-00/classify' - classify_response = '{"url": "https://gateway.watsonplatform.net/natural-language-classifier/api/' \ - 'v1", "text": "test", "classes": [{"class_name": "conditions", "confidence": ' \ - '0.6575315710901418}, {"class_name": "temperature", "confidence": 0.3424684289098582}], ' \ - '"classifier_id": "497EF2-nlc-00", "top_class": "conditions"}' - - responses.add(responses.POST, classify_url, - body=classify_response, status=200, - content_type='application/json') - - natural_language_classifier.classify('497EF2-nlc-00', 'test') - - assert responses.calls[2].request.url == classify_url - assert responses.calls[2].response.text == classify_response - - create_url = 'https://gateway.watsonplatform.net/natural-language-classifier/api/v1/classifiers' - create_response = '{"url": "https://gateway.watsonplatform.net/natural-language-classifier/api/v1/' \ - 'classifiers/497EF2-nlc-00", "status": "Available", "status_description": "The classifier ' \ - 'instance is now available and is ready to take classifier requests.", "classifier_id": ' \ - '"497EF2-nlc-00"}' - - responses.add(responses.POST, create_url, - body=create_response, status=200, - content_type='application/json') - with open(os.path.join(os.path.dirname(__file__), '../../resources/weather_data_train.csv'), 'rb') as training_data: - natural_language_classifier.create_classifier( - training_data=training_data, metadata='{"language": "en"}') - - assert responses.calls[3].request.url == create_url - assert responses.calls[3].response.text == create_response - - remove_url = status_url - remove_response = '{}' - - responses.add(responses.DELETE, remove_url, - body=remove_response, status=200, - content_type='application/json') - - natural_language_classifier.delete_classifier('497EF2-nlc-00') - - assert responses.calls[4].request.url == remove_url - assert responses.calls[4].response.text == remove_response - - assert len(responses.calls) == 5 - -@responses.activate -def test_classify_collection(): - natural_language_classifier = ibm_watson.NaturalLanguageClassifierV1(username="username", - password="password") - classify_collection_url = 'https://gateway.watsonplatform.net/natural-language-classifier/api/v1/classifiers/497EF2-nlc-00/classify_collection' - classify_collection_response = '{ \ - "classifier_id": "497EF2-nlc-00", \ - "url": "https://gateway.watsonplatform.net/natural-language-classifier/api/v1/classifiers/10D41B-nlc-1", \ - "collection": [ \ - { \ - "text": "How hot will it be today?", \ - "top_class": "temperature", \ - "classes": [ \ - { \ - "class_name": "temperature", \ - "confidence": 0.9930558798985937 \ - }, \ - { \ - "class_name": "conditions", \ - "confidence": 0.006944120101406304 \ - } \ - ] \ - }, \ - { \ - "text": "Is it hot outside?", \ - "top_class": "temperature", \ - "classes": [ \ - { \ - "class_name": "temperature", \ - "confidence": 1 \ - }, \ - { \ - "class_name": "conditions", \ - "confidence": 0 \ - } \ - ] \ - } \ - ] \ - }' - responses.add(responses.POST, classify_collection_url, - body=classify_collection_response, status=200, - content_type='application/json') - - classifier_id = '497EF2-nlc-00' - collection = ['{"text":"How hot will it be today?"}', '{"text":"Is it hot outside?"}'] - natural_language_classifier.classify_collection(classifier_id, collection) - - assert responses.calls[0].request.url == classify_collection_url - assert responses.calls[0].response.text == classify_collection_response diff --git a/test/unit/test_natural_language_understanding.py b/test/unit/test_natural_language_understanding.py deleted file mode 100644 index 4a18ad0a4..000000000 --- a/test/unit/test_natural_language_understanding.py +++ /dev/null @@ -1,153 +0,0 @@ -# coding: utf-8 -from unittest import TestCase -from ibm_watson import NaturalLanguageUnderstandingV1 -from ibm_watson.natural_language_understanding_v1 import \ - Features, ConceptsOptions, EntitiesOptions, KeywordsOptions, CategoriesOptions, \ - EmotionOptions, MetadataOptions, SemanticRolesOptions, RelationsOptions, \ - SentimentOptions - -import os -import pytest -import responses - - -base_url = 'https://gateway.watsonplatform.net' -default_url = '{0}/natural-language-understanding/api'.format(base_url) - - -class TestFeatures(TestCase): - def test_concepts(self): - c = Features(concepts=ConceptsOptions()) - assert c._to_dict() == {'concepts': {}} - c = Features(concepts=ConceptsOptions(limit=10)) - assert c._to_dict() == {'concepts': {'limit': 10}} - - def test_entities(self): - e = Features(entities=EntitiesOptions()) - assert e._to_dict() == {'entities': {}} - - def test_keywords(self): - k = Features(keywords=KeywordsOptions()) - assert k._to_dict() == {'keywords': {}} - - def test_categories(self): - c = Features(categories=CategoriesOptions()) - assert c._to_dict() == {'categories': {}} - - def test_emotion(self): - e = Features(emotion=EmotionOptions()) - assert e._to_dict() == {'emotion': {}} - - def test_metadata(self): - m = Features(metadata=MetadataOptions()) - assert m._to_dict() == {'metadata': {}} - - def test_semantic_roles(self): - s = Features(semantic_roles=SemanticRolesOptions()) - assert s._to_dict() == {'semantic_roles': {}} - - def test_relations(self): - r = Features(relations=RelationsOptions()) - assert r._to_dict() == {'relations': {}} - - def test_sentiment(self): - s = Features(sentiment=SentimentOptions()) - assert s._to_dict() == {'sentiment': {}} - - -class TestNaturalLanguageUnderstanding(TestCase): - def test_version_date(self): - with pytest.raises(TypeError): - NaturalLanguageUnderstandingV1() # pylint: disable=E1120 - nlu = NaturalLanguageUnderstandingV1(version='2016-01-23', - url='http://bogus.com', - username='username', - password='password') - assert nlu - - @pytest.mark.skipif(os.getenv('VCAP_SERVICES') is not None, - reason='credentials may come from VCAP_SERVICES') - def test_missing_credentials(self): - with pytest.raises(ValueError): - NaturalLanguageUnderstandingV1(version='2016-01-23') - with pytest.raises(ValueError): - NaturalLanguageUnderstandingV1(version='2016-01-23', - url='https://bogus.com') - - def test_analyze_throws(self): - nlu = NaturalLanguageUnderstandingV1(version='2016-01-23', - url='http://bogus.com', - username='username', - password='password') - with pytest.raises(ValueError): - nlu.analyze(None, text="this will not work") - - @responses.activate - def test_text_analyze(self): - nlu_url = "http://bogus.com/v1/analyze" - responses.add(responses.POST, nlu_url, - body="{\"resulting_key\": true}", status=200, - content_type='application/json') - nlu = NaturalLanguageUnderstandingV1(version='2016-01-23', - url='http://bogus.com', - username='username', - password='password') - nlu.analyze(Features(sentiment=SentimentOptions()), text="hello this is a test") - assert len(responses.calls) == 1 - - @responses.activate - def test_html_analyze(self): - nlu_url = "http://bogus.com/v1/analyze" - responses.add(responses.POST, nlu_url, - body="{\"resulting_key\": true}", status=200, - content_type='application/json') - nlu = NaturalLanguageUnderstandingV1(version='2016-01-23', - url='http://bogus.com', - username='username', - password='password') - nlu.analyze(Features(sentiment=SentimentOptions(), - emotion=EmotionOptions(document=False)), - html="hello this is a test") - assert len(responses.calls) == 1 - - @responses.activate - def test_url_analyze(self): - nlu_url = "http://bogus.com/v1/analyze" - responses.add(responses.POST, nlu_url, - body="{\"resulting_key\": true}", status=200, - content_type='application/json') - nlu = NaturalLanguageUnderstandingV1(version='2016-01-23', - url='http://bogus.com', - username='username', - password='password') - nlu.analyze(Features(sentiment=SentimentOptions(), - emotion=EmotionOptions(document=False)), - url="http://cnn.com", - xpath="/bogus/xpath", language="en") - assert len(responses.calls) == 1 - - @responses.activate - def test_list_models(self): - nlu_url = "http://bogus.com/v1/models" - responses.add(responses.GET, nlu_url, status=200, - body="{\"resulting_key\": true}", - content_type='application/json') - nlu = NaturalLanguageUnderstandingV1(version='2016-01-23', - url='http://bogus.com', - username='username', - password='password') - nlu.list_models() - assert len(responses.calls) == 1 - - @responses.activate - def test_delete_model(self): - model_id = "invalid_model_id" - nlu_url = "http://bogus.com/v1/models/" + model_id - responses.add(responses.DELETE, nlu_url, status=200, - body="{}", content_type='application/json') - nlu = NaturalLanguageUnderstandingV1(version='2016-01-23', - url='http://bogus.com', - username='username', - password='password') - nlu.delete_model(model_id) - assert len(responses.calls) == 1 diff --git a/test/unit/test_natural_language_understanding_v1.py b/test/unit/test_natural_language_understanding_v1.py new file mode 100644 index 000000000..40ca030a6 --- /dev/null +++ b/test/unit/test_natural_language_understanding_v1.py @@ -0,0 +1,3839 @@ +# -*- coding: utf-8 -*- +# (C) Copyright IBM Corp. 2019, 2024. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Unit Tests for NaturalLanguageUnderstandingV1 +""" + +from datetime import datetime, timezone +from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator +from ibm_cloud_sdk_core.utils import datetime_to_string, string_to_datetime +import inspect +import io +import json +import pytest +import re +import requests +import responses +import tempfile +import urllib +from ibm_watson.natural_language_understanding_v1 import * + +version = 'testString' + +_service = NaturalLanguageUnderstandingV1( + authenticator=NoAuthAuthenticator(), + version=version, +) + +_base_url = 'https://api.us-south.natural-language-understanding.watson.cloud.ibm.com' +_service.set_service_url(_base_url) + + +def preprocess_url(operation_path: str): + """ + Returns the request url associated with the specified operation path. + This will be base_url concatenated with a quoted version of operation_path. + The returned request URL is used to register the mock response so it needs + to match the request URL that is formed by the requests library. + """ + + # Form the request URL from the base URL and operation path. + request_url = _base_url + operation_path + + # If the request url does NOT end with a /, then just return it as-is. + # Otherwise, return a regular expression that matches one or more trailing /. + if not request_url.endswith('/'): + return request_url + return re.compile(request_url.rstrip('/') + '/+') + + +############################################################################## +# Start of Service: Analyze +############################################################################## +# region + + +class TestAnalyze: + """ + Test Class for analyze + """ + + @responses.activate + def test_analyze_all_params(self): + """ + analyze() + """ + # Set up mock + url = preprocess_url('/v1/analyze') + mock_response = '{"language": "language", "analyzed_text": "analyzed_text", "retrieved_url": "retrieved_url", "usage": {"features": 8, "text_characters": 15, "text_units": 10}, "concepts": [{"text": "text", "relevance": 9, "dbpedia_resource": "dbpedia_resource"}], "entities": [{"type": "type", "text": "text", "relevance": 9, "confidence": 10, "mentions": [{"text": "text", "location": [8], "confidence": 10}], "count": 5, "emotion": {"anger": 5, "disgust": 7, "fear": 4, "joy": 3, "sadness": 7}, "sentiment": {"score": 5}, "disambiguation": {"name": "name", "dbpedia_resource": "dbpedia_resource", "subtype": ["subtype"]}}], "keywords": [{"count": 5, "relevance": 9, "text": "text", "emotion": {"anger": 5, "disgust": 7, "fear": 4, "joy": 3, "sadness": 7}, "sentiment": {"score": 5}}], "categories": [{"label": "label", "score": 5, "explanation": {"relevant_text": [{"text": "text"}]}}], "classifications": [{"class_name": "class_name", "confidence": 10}], "emotion": {"document": {"emotion": {"anger": 5, "disgust": 7, "fear": 4, "joy": 3, "sadness": 7}}, "targets": [{"text": "text", "emotion": {"anger": 5, "disgust": 7, "fear": 4, "joy": 3, "sadness": 7}}]}, "metadata": {"authors": [{"name": "name"}], "publication_date": "publication_date", "title": "title", "image": "image", "feeds": [{"link": "link"}]}, "relations": [{"score": 5, "sentence": "sentence", "type": "type", "arguments": [{"entities": [{"text": "text", "type": "type"}], "location": [8], "text": "text"}]}], "semantic_roles": [{"sentence": "sentence", "subject": {"text": "text", "entities": [{"type": "type", "text": "text"}], "keywords": [{"text": "text"}]}, "action": {"text": "text", "normalized": "normalized", "verb": {"text": "text", "tense": "tense"}}, "object": {"text": "text", "keywords": [{"text": "text"}]}}], "sentiment": {"document": {"label": "label", "score": 5}, "targets": [{"text": "text", "score": 5}]}, "syntax": {"tokens": [{"text": "text", "part_of_speech": "ADJ", "location": [8], "lemma": "lemma"}], "sentences": [{"text": "text", "location": [8]}]}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Construct a dict representation of a ClassificationsOptions model + classifications_options_model = {} + classifications_options_model['model'] = 'testString' + + # Construct a dict representation of a ConceptsOptions model + concepts_options_model = {} + concepts_options_model['limit'] = 8 + + # Construct a dict representation of a EmotionOptions model + emotion_options_model = {} + emotion_options_model['document'] = True + emotion_options_model['targets'] = ['testString'] + + # Construct a dict representation of a EntitiesOptions model + entities_options_model = {} + entities_options_model['limit'] = 50 + entities_options_model['mentions'] = False + entities_options_model['model'] = 'testString' + entities_options_model['sentiment'] = False + entities_options_model['emotion'] = False + + # Construct a dict representation of a KeywordsOptions model + keywords_options_model = {} + keywords_options_model['limit'] = 50 + keywords_options_model['sentiment'] = False + keywords_options_model['emotion'] = False + + # Construct a dict representation of a RelationsOptions model + relations_options_model = {} + relations_options_model['model'] = 'testString' + + # Construct a dict representation of a SemanticRolesOptions model + semantic_roles_options_model = {} + semantic_roles_options_model['limit'] = 50 + semantic_roles_options_model['keywords'] = False + semantic_roles_options_model['entities'] = False + + # Construct a dict representation of a SentimentOptions model + sentiment_options_model = {} + sentiment_options_model['document'] = True + sentiment_options_model['targets'] = ['testString'] + + # Construct a dict representation of a CategoriesOptions model + categories_options_model = {} + categories_options_model['explanation'] = False + categories_options_model['limit'] = 3 + categories_options_model['model'] = 'testString' + + # Construct a dict representation of a SyntaxOptionsTokens model + syntax_options_tokens_model = {} + syntax_options_tokens_model['lemma'] = True + syntax_options_tokens_model['part_of_speech'] = True + + # Construct a dict representation of a SyntaxOptions model + syntax_options_model = {} + syntax_options_model['tokens'] = syntax_options_tokens_model + syntax_options_model['sentences'] = True + + # Construct a dict representation of a Features model + features_model = {} + features_model['classifications'] = classifications_options_model + features_model['concepts'] = concepts_options_model + features_model['emotion'] = emotion_options_model + features_model['entities'] = entities_options_model + features_model['keywords'] = keywords_options_model + features_model['metadata'] = {'anyKey': 'anyValue'} + features_model['relations'] = relations_options_model + features_model['semantic_roles'] = semantic_roles_options_model + features_model['sentiment'] = sentiment_options_model + features_model['categories'] = categories_options_model + features_model['syntax'] = syntax_options_model + + # Set up parameter values + features = features_model + text = 'testString' + html = 'testString' + url = 'testString' + clean = True + xpath = 'testString' + fallback_to_raw = True + return_analyzed_text = False + language = 'testString' + limit_text_characters = 38 + + # Invoke method + response = _service.analyze( + features, + text=text, + html=html, + url=url, + clean=clean, + xpath=xpath, + fallback_to_raw=fallback_to_raw, + return_analyzed_text=return_analyzed_text, + language=language, + limit_text_characters=limit_text_characters, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['features'] == features_model + assert req_body['text'] == 'testString' + assert req_body['html'] == 'testString' + assert req_body['url'] == 'testString' + assert req_body['clean'] == True + assert req_body['xpath'] == 'testString' + assert req_body['fallback_to_raw'] == True + assert req_body['return_analyzed_text'] == False + assert req_body['language'] == 'testString' + assert req_body['limit_text_characters'] == 38 + + def test_analyze_all_params_with_retries(self): + # Enable retries and run test_analyze_all_params. + _service.enable_retries() + self.test_analyze_all_params() + + # Disable retries and run test_analyze_all_params. + _service.disable_retries() + self.test_analyze_all_params() + + @responses.activate + def test_analyze_value_error(self): + """ + test_analyze_value_error() + """ + # Set up mock + url = preprocess_url('/v1/analyze') + mock_response = '{"language": "language", "analyzed_text": "analyzed_text", "retrieved_url": "retrieved_url", "usage": {"features": 8, "text_characters": 15, "text_units": 10}, "concepts": [{"text": "text", "relevance": 9, "dbpedia_resource": "dbpedia_resource"}], "entities": [{"type": "type", "text": "text", "relevance": 9, "confidence": 10, "mentions": [{"text": "text", "location": [8], "confidence": 10}], "count": 5, "emotion": {"anger": 5, "disgust": 7, "fear": 4, "joy": 3, "sadness": 7}, "sentiment": {"score": 5}, "disambiguation": {"name": "name", "dbpedia_resource": "dbpedia_resource", "subtype": ["subtype"]}}], "keywords": [{"count": 5, "relevance": 9, "text": "text", "emotion": {"anger": 5, "disgust": 7, "fear": 4, "joy": 3, "sadness": 7}, "sentiment": {"score": 5}}], "categories": [{"label": "label", "score": 5, "explanation": {"relevant_text": [{"text": "text"}]}}], "classifications": [{"class_name": "class_name", "confidence": 10}], "emotion": {"document": {"emotion": {"anger": 5, "disgust": 7, "fear": 4, "joy": 3, "sadness": 7}}, "targets": [{"text": "text", "emotion": {"anger": 5, "disgust": 7, "fear": 4, "joy": 3, "sadness": 7}}]}, "metadata": {"authors": [{"name": "name"}], "publication_date": "publication_date", "title": "title", "image": "image", "feeds": [{"link": "link"}]}, "relations": [{"score": 5, "sentence": "sentence", "type": "type", "arguments": [{"entities": [{"text": "text", "type": "type"}], "location": [8], "text": "text"}]}], "semantic_roles": [{"sentence": "sentence", "subject": {"text": "text", "entities": [{"type": "type", "text": "text"}], "keywords": [{"text": "text"}]}, "action": {"text": "text", "normalized": "normalized", "verb": {"text": "text", "tense": "tense"}}, "object": {"text": "text", "keywords": [{"text": "text"}]}}], "sentiment": {"document": {"label": "label", "score": 5}, "targets": [{"text": "text", "score": 5}]}, "syntax": {"tokens": [{"text": "text", "part_of_speech": "ADJ", "location": [8], "lemma": "lemma"}], "sentences": [{"text": "text", "location": [8]}]}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Construct a dict representation of a ClassificationsOptions model + classifications_options_model = {} + classifications_options_model['model'] = 'testString' + + # Construct a dict representation of a ConceptsOptions model + concepts_options_model = {} + concepts_options_model['limit'] = 8 + + # Construct a dict representation of a EmotionOptions model + emotion_options_model = {} + emotion_options_model['document'] = True + emotion_options_model['targets'] = ['testString'] + + # Construct a dict representation of a EntitiesOptions model + entities_options_model = {} + entities_options_model['limit'] = 50 + entities_options_model['mentions'] = False + entities_options_model['model'] = 'testString' + entities_options_model['sentiment'] = False + entities_options_model['emotion'] = False + + # Construct a dict representation of a KeywordsOptions model + keywords_options_model = {} + keywords_options_model['limit'] = 50 + keywords_options_model['sentiment'] = False + keywords_options_model['emotion'] = False + + # Construct a dict representation of a RelationsOptions model + relations_options_model = {} + relations_options_model['model'] = 'testString' + + # Construct a dict representation of a SemanticRolesOptions model + semantic_roles_options_model = {} + semantic_roles_options_model['limit'] = 50 + semantic_roles_options_model['keywords'] = False + semantic_roles_options_model['entities'] = False + + # Construct a dict representation of a SentimentOptions model + sentiment_options_model = {} + sentiment_options_model['document'] = True + sentiment_options_model['targets'] = ['testString'] + + # Construct a dict representation of a CategoriesOptions model + categories_options_model = {} + categories_options_model['explanation'] = False + categories_options_model['limit'] = 3 + categories_options_model['model'] = 'testString' + + # Construct a dict representation of a SyntaxOptionsTokens model + syntax_options_tokens_model = {} + syntax_options_tokens_model['lemma'] = True + syntax_options_tokens_model['part_of_speech'] = True + + # Construct a dict representation of a SyntaxOptions model + syntax_options_model = {} + syntax_options_model['tokens'] = syntax_options_tokens_model + syntax_options_model['sentences'] = True + + # Construct a dict representation of a Features model + features_model = {} + features_model['classifications'] = classifications_options_model + features_model['concepts'] = concepts_options_model + features_model['emotion'] = emotion_options_model + features_model['entities'] = entities_options_model + features_model['keywords'] = keywords_options_model + features_model['metadata'] = {'anyKey': 'anyValue'} + features_model['relations'] = relations_options_model + features_model['semantic_roles'] = semantic_roles_options_model + features_model['sentiment'] = sentiment_options_model + features_model['categories'] = categories_options_model + features_model['syntax'] = syntax_options_model + + # Set up parameter values + features = features_model + text = 'testString' + html = 'testString' + url = 'testString' + clean = True + xpath = 'testString' + fallback_to_raw = True + return_analyzed_text = False + language = 'testString' + limit_text_characters = 38 + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "features": features, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.analyze(**req_copy) + + def test_analyze_value_error_with_retries(self): + # Enable retries and run test_analyze_value_error. + _service.enable_retries() + self.test_analyze_value_error() + + # Disable retries and run test_analyze_value_error. + _service.disable_retries() + self.test_analyze_value_error() + + +# endregion +############################################################################## +# End of Service: Analyze +############################################################################## + +############################################################################## +# Start of Service: ManageModels +############################################################################## +# region + + +class TestListModels: + """ + Test Class for list_models + """ + + @responses.activate + def test_list_models_all_params(self): + """ + list_models() + """ + # Set up mock + url = preprocess_url('/v1/models') + mock_response = '{"models": [{"status": "starting", "model_id": "model_id", "language": "language", "description": "description", "workspace_id": "workspace_id", "model_version": "model_version", "version": "version", "version_description": "version_description", "created": "2019-01-01T12:00:00.000Z"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Invoke method + response = _service.list_models() + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_models_all_params_with_retries(self): + # Enable retries and run test_list_models_all_params. + _service.enable_retries() + self.test_list_models_all_params() + + # Disable retries and run test_list_models_all_params. + _service.disable_retries() + self.test_list_models_all_params() + + @responses.activate + def test_list_models_value_error(self): + """ + test_list_models_value_error() + """ + # Set up mock + url = preprocess_url('/v1/models') + mock_response = '{"models": [{"status": "starting", "model_id": "model_id", "language": "language", "description": "description", "workspace_id": "workspace_id", "model_version": "model_version", "version": "version", "version_description": "version_description", "created": "2019-01-01T12:00:00.000Z"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.list_models(**req_copy) + + def test_list_models_value_error_with_retries(self): + # Enable retries and run test_list_models_value_error. + _service.enable_retries() + self.test_list_models_value_error() + + # Disable retries and run test_list_models_value_error. + _service.disable_retries() + self.test_list_models_value_error() + + +class TestDeleteModel: + """ + Test Class for delete_model + """ + + @responses.activate + def test_delete_model_all_params(self): + """ + delete_model() + """ + # Set up mock + url = preprocess_url('/v1/models/testString') + mock_response = '{"deleted": "deleted"}' + responses.add( + responses.DELETE, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + model_id = 'testString' + + # Invoke method + response = _service.delete_model( + model_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_delete_model_all_params_with_retries(self): + # Enable retries and run test_delete_model_all_params. + _service.enable_retries() + self.test_delete_model_all_params() + + # Disable retries and run test_delete_model_all_params. + _service.disable_retries() + self.test_delete_model_all_params() + + @responses.activate + def test_delete_model_value_error(self): + """ + test_delete_model_value_error() + """ + # Set up mock + url = preprocess_url('/v1/models/testString') + mock_response = '{"deleted": "deleted"}' + responses.add( + responses.DELETE, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + model_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "model_id": model_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_model(**req_copy) + + def test_delete_model_value_error_with_retries(self): + # Enable retries and run test_delete_model_value_error. + _service.enable_retries() + self.test_delete_model_value_error() + + # Disable retries and run test_delete_model_value_error. + _service.disable_retries() + self.test_delete_model_value_error() + + +# endregion +############################################################################## +# End of Service: ManageModels +############################################################################## + +############################################################################## +# Start of Service: ManageCategoriesModels +############################################################################## +# region + + +class TestCreateCategoriesModel: + """ + Test Class for create_categories_model + """ + + @responses.activate + def test_create_categories_model_all_params(self): + """ + create_categories_model() + """ + # Set up mock + url = preprocess_url('/v1/models/categories') + mock_response = '{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Set up parameter values + language = 'testString' + training_data = io.BytesIO(b'This is a mock file.').getvalue() + training_data_content_type = 'json' + name = 'testString' + user_metadata = {'region': 'North America', 'latest': True} + description = 'testString' + model_version = 'testString' + workspace_id = 'testString' + version_description = 'testString' + + # Invoke method + response = _service.create_categories_model( + language, + training_data, + training_data_content_type=training_data_content_type, + name=name, + user_metadata=user_metadata, + description=description, + model_version=model_version, + workspace_id=workspace_id, + version_description=version_description, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + + def test_create_categories_model_all_params_with_retries(self): + # Enable retries and run test_create_categories_model_all_params. + _service.enable_retries() + self.test_create_categories_model_all_params() + + # Disable retries and run test_create_categories_model_all_params. + _service.disable_retries() + self.test_create_categories_model_all_params() + + @responses.activate + def test_create_categories_model_required_params(self): + """ + test_create_categories_model_required_params() + """ + # Set up mock + url = preprocess_url('/v1/models/categories') + mock_response = '{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Set up parameter values + language = 'testString' + training_data = io.BytesIO(b'This is a mock file.').getvalue() + + # Invoke method + response = _service.create_categories_model( + language, + training_data, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + + def test_create_categories_model_required_params_with_retries(self): + # Enable retries and run test_create_categories_model_required_params. + _service.enable_retries() + self.test_create_categories_model_required_params() + + # Disable retries and run test_create_categories_model_required_params. + _service.disable_retries() + self.test_create_categories_model_required_params() + + @responses.activate + def test_create_categories_model_value_error(self): + """ + test_create_categories_model_value_error() + """ + # Set up mock + url = preprocess_url('/v1/models/categories') + mock_response = '{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Set up parameter values + language = 'testString' + training_data = io.BytesIO(b'This is a mock file.').getvalue() + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "language": language, + "training_data": training_data, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.create_categories_model(**req_copy) + + def test_create_categories_model_value_error_with_retries(self): + # Enable retries and run test_create_categories_model_value_error. + _service.enable_retries() + self.test_create_categories_model_value_error() + + # Disable retries and run test_create_categories_model_value_error. + _service.disable_retries() + self.test_create_categories_model_value_error() + + +class TestListCategoriesModels: + """ + Test Class for list_categories_models + """ + + @responses.activate + def test_list_categories_models_all_params(self): + """ + list_categories_models() + """ + # Set up mock + url = preprocess_url('/v1/models/categories') + mock_response = '{"models": [{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Invoke method + response = _service.list_categories_models() + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_categories_models_all_params_with_retries(self): + # Enable retries and run test_list_categories_models_all_params. + _service.enable_retries() + self.test_list_categories_models_all_params() + + # Disable retries and run test_list_categories_models_all_params. + _service.disable_retries() + self.test_list_categories_models_all_params() + + @responses.activate + def test_list_categories_models_value_error(self): + """ + test_list_categories_models_value_error() + """ + # Set up mock + url = preprocess_url('/v1/models/categories') + mock_response = '{"models": [{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.list_categories_models(**req_copy) + + def test_list_categories_models_value_error_with_retries(self): + # Enable retries and run test_list_categories_models_value_error. + _service.enable_retries() + self.test_list_categories_models_value_error() + + # Disable retries and run test_list_categories_models_value_error. + _service.disable_retries() + self.test_list_categories_models_value_error() + + +class TestGetCategoriesModel: + """ + Test Class for get_categories_model + """ + + @responses.activate + def test_get_categories_model_all_params(self): + """ + get_categories_model() + """ + # Set up mock + url = preprocess_url('/v1/models/categories/testString') + mock_response = '{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + model_id = 'testString' + + # Invoke method + response = _service.get_categories_model( + model_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_get_categories_model_all_params_with_retries(self): + # Enable retries and run test_get_categories_model_all_params. + _service.enable_retries() + self.test_get_categories_model_all_params() + + # Disable retries and run test_get_categories_model_all_params. + _service.disable_retries() + self.test_get_categories_model_all_params() + + @responses.activate + def test_get_categories_model_value_error(self): + """ + test_get_categories_model_value_error() + """ + # Set up mock + url = preprocess_url('/v1/models/categories/testString') + mock_response = '{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + model_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "model_id": model_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_categories_model(**req_copy) + + def test_get_categories_model_value_error_with_retries(self): + # Enable retries and run test_get_categories_model_value_error. + _service.enable_retries() + self.test_get_categories_model_value_error() + + # Disable retries and run test_get_categories_model_value_error. + _service.disable_retries() + self.test_get_categories_model_value_error() + + +class TestUpdateCategoriesModel: + """ + Test Class for update_categories_model + """ + + @responses.activate + def test_update_categories_model_all_params(self): + """ + update_categories_model() + """ + # Set up mock + url = preprocess_url('/v1/models/categories/testString') + mock_response = '{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.PUT, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + model_id = 'testString' + language = 'testString' + training_data = io.BytesIO(b'This is a mock file.').getvalue() + training_data_content_type = 'json' + name = 'testString' + user_metadata = {'region': 'North America', 'latest': True} + description = 'testString' + model_version = 'testString' + workspace_id = 'testString' + version_description = 'testString' + + # Invoke method + response = _service.update_categories_model( + model_id, + language, + training_data, + training_data_content_type=training_data_content_type, + name=name, + user_metadata=user_metadata, + description=description, + model_version=model_version, + workspace_id=workspace_id, + version_description=version_description, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_update_categories_model_all_params_with_retries(self): + # Enable retries and run test_update_categories_model_all_params. + _service.enable_retries() + self.test_update_categories_model_all_params() + + # Disable retries and run test_update_categories_model_all_params. + _service.disable_retries() + self.test_update_categories_model_all_params() + + @responses.activate + def test_update_categories_model_required_params(self): + """ + test_update_categories_model_required_params() + """ + # Set up mock + url = preprocess_url('/v1/models/categories/testString') + mock_response = '{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.PUT, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + model_id = 'testString' + language = 'testString' + training_data = io.BytesIO(b'This is a mock file.').getvalue() + + # Invoke method + response = _service.update_categories_model( + model_id, + language, + training_data, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_update_categories_model_required_params_with_retries(self): + # Enable retries and run test_update_categories_model_required_params. + _service.enable_retries() + self.test_update_categories_model_required_params() + + # Disable retries and run test_update_categories_model_required_params. + _service.disable_retries() + self.test_update_categories_model_required_params() + + @responses.activate + def test_update_categories_model_value_error(self): + """ + test_update_categories_model_value_error() + """ + # Set up mock + url = preprocess_url('/v1/models/categories/testString') + mock_response = '{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.PUT, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + model_id = 'testString' + language = 'testString' + training_data = io.BytesIO(b'This is a mock file.').getvalue() + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "model_id": model_id, + "language": language, + "training_data": training_data, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.update_categories_model(**req_copy) + + def test_update_categories_model_value_error_with_retries(self): + # Enable retries and run test_update_categories_model_value_error. + _service.enable_retries() + self.test_update_categories_model_value_error() + + # Disable retries and run test_update_categories_model_value_error. + _service.disable_retries() + self.test_update_categories_model_value_error() + + +class TestDeleteCategoriesModel: + """ + Test Class for delete_categories_model + """ + + @responses.activate + def test_delete_categories_model_all_params(self): + """ + delete_categories_model() + """ + # Set up mock + url = preprocess_url('/v1/models/categories/testString') + mock_response = '{"deleted": "deleted"}' + responses.add( + responses.DELETE, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + model_id = 'testString' + + # Invoke method + response = _service.delete_categories_model( + model_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_delete_categories_model_all_params_with_retries(self): + # Enable retries and run test_delete_categories_model_all_params. + _service.enable_retries() + self.test_delete_categories_model_all_params() + + # Disable retries and run test_delete_categories_model_all_params. + _service.disable_retries() + self.test_delete_categories_model_all_params() + + @responses.activate + def test_delete_categories_model_value_error(self): + """ + test_delete_categories_model_value_error() + """ + # Set up mock + url = preprocess_url('/v1/models/categories/testString') + mock_response = '{"deleted": "deleted"}' + responses.add( + responses.DELETE, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + model_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "model_id": model_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_categories_model(**req_copy) + + def test_delete_categories_model_value_error_with_retries(self): + # Enable retries and run test_delete_categories_model_value_error. + _service.enable_retries() + self.test_delete_categories_model_value_error() + + # Disable retries and run test_delete_categories_model_value_error. + _service.disable_retries() + self.test_delete_categories_model_value_error() + + +# endregion +############################################################################## +# End of Service: ManageCategoriesModels +############################################################################## + +############################################################################## +# Start of Service: ManageClassificationsModels +############################################################################## +# region + + +class TestCreateClassificationsModel: + """ + Test Class for create_classifications_model + """ + + @responses.activate + def test_create_classifications_model_all_params(self): + """ + create_classifications_model() + """ + # Set up mock + url = preprocess_url('/v1/models/classifications') + mock_response = '{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Construct a dict representation of a ClassificationsTrainingParameters model + classifications_training_parameters_model = {} + classifications_training_parameters_model['model_type'] = 'single_label' + + # Set up parameter values + language = 'testString' + training_data = io.BytesIO(b'This is a mock file.').getvalue() + training_data_content_type = 'json' + name = 'testString' + user_metadata = {'region': 'North America', 'latest': True} + description = 'testString' + model_version = 'testString' + workspace_id = 'testString' + version_description = 'testString' + training_parameters = classifications_training_parameters_model + + # Invoke method + response = _service.create_classifications_model( + language, + training_data, + training_data_content_type=training_data_content_type, + name=name, + user_metadata=user_metadata, + description=description, + model_version=model_version, + workspace_id=workspace_id, + version_description=version_description, + training_parameters=training_parameters, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + + def test_create_classifications_model_all_params_with_retries(self): + # Enable retries and run test_create_classifications_model_all_params. + _service.enable_retries() + self.test_create_classifications_model_all_params() + + # Disable retries and run test_create_classifications_model_all_params. + _service.disable_retries() + self.test_create_classifications_model_all_params() + + @responses.activate + def test_create_classifications_model_required_params(self): + """ + test_create_classifications_model_required_params() + """ + # Set up mock + url = preprocess_url('/v1/models/classifications') + mock_response = '{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Set up parameter values + language = 'testString' + training_data = io.BytesIO(b'This is a mock file.').getvalue() + + # Invoke method + response = _service.create_classifications_model( + language, + training_data, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + + def test_create_classifications_model_required_params_with_retries(self): + # Enable retries and run test_create_classifications_model_required_params. + _service.enable_retries() + self.test_create_classifications_model_required_params() + + # Disable retries and run test_create_classifications_model_required_params. + _service.disable_retries() + self.test_create_classifications_model_required_params() + + @responses.activate + def test_create_classifications_model_value_error(self): + """ + test_create_classifications_model_value_error() + """ + # Set up mock + url = preprocess_url('/v1/models/classifications') + mock_response = '{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Set up parameter values + language = 'testString' + training_data = io.BytesIO(b'This is a mock file.').getvalue() + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "language": language, + "training_data": training_data, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.create_classifications_model(**req_copy) + + def test_create_classifications_model_value_error_with_retries(self): + # Enable retries and run test_create_classifications_model_value_error. + _service.enable_retries() + self.test_create_classifications_model_value_error() + + # Disable retries and run test_create_classifications_model_value_error. + _service.disable_retries() + self.test_create_classifications_model_value_error() + + +class TestListClassificationsModels: + """ + Test Class for list_classifications_models + """ + + @responses.activate + def test_list_classifications_models_all_params(self): + """ + list_classifications_models() + """ + # Set up mock + url = preprocess_url('/v1/models/classifications') + mock_response = '{"models": [{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Invoke method + response = _service.list_classifications_models() + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_classifications_models_all_params_with_retries(self): + # Enable retries and run test_list_classifications_models_all_params. + _service.enable_retries() + self.test_list_classifications_models_all_params() + + # Disable retries and run test_list_classifications_models_all_params. + _service.disable_retries() + self.test_list_classifications_models_all_params() + + @responses.activate + def test_list_classifications_models_value_error(self): + """ + test_list_classifications_models_value_error() + """ + # Set up mock + url = preprocess_url('/v1/models/classifications') + mock_response = '{"models": [{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.list_classifications_models(**req_copy) + + def test_list_classifications_models_value_error_with_retries(self): + # Enable retries and run test_list_classifications_models_value_error. + _service.enable_retries() + self.test_list_classifications_models_value_error() + + # Disable retries and run test_list_classifications_models_value_error. + _service.disable_retries() + self.test_list_classifications_models_value_error() + + +class TestGetClassificationsModel: + """ + Test Class for get_classifications_model + """ + + @responses.activate + def test_get_classifications_model_all_params(self): + """ + get_classifications_model() + """ + # Set up mock + url = preprocess_url('/v1/models/classifications/testString') + mock_response = '{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + model_id = 'testString' + + # Invoke method + response = _service.get_classifications_model( + model_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_get_classifications_model_all_params_with_retries(self): + # Enable retries and run test_get_classifications_model_all_params. + _service.enable_retries() + self.test_get_classifications_model_all_params() + + # Disable retries and run test_get_classifications_model_all_params. + _service.disable_retries() + self.test_get_classifications_model_all_params() + + @responses.activate + def test_get_classifications_model_value_error(self): + """ + test_get_classifications_model_value_error() + """ + # Set up mock + url = preprocess_url('/v1/models/classifications/testString') + mock_response = '{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + model_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "model_id": model_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_classifications_model(**req_copy) + + def test_get_classifications_model_value_error_with_retries(self): + # Enable retries and run test_get_classifications_model_value_error. + _service.enable_retries() + self.test_get_classifications_model_value_error() + + # Disable retries and run test_get_classifications_model_value_error. + _service.disable_retries() + self.test_get_classifications_model_value_error() + + +class TestUpdateClassificationsModel: + """ + Test Class for update_classifications_model + """ + + @responses.activate + def test_update_classifications_model_all_params(self): + """ + update_classifications_model() + """ + # Set up mock + url = preprocess_url('/v1/models/classifications/testString') + mock_response = '{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.PUT, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Construct a dict representation of a ClassificationsTrainingParameters model + classifications_training_parameters_model = {} + classifications_training_parameters_model['model_type'] = 'single_label' + + # Set up parameter values + model_id = 'testString' + language = 'testString' + training_data = io.BytesIO(b'This is a mock file.').getvalue() + training_data_content_type = 'json' + name = 'testString' + user_metadata = {'region': 'North America', 'latest': True} + description = 'testString' + model_version = 'testString' + workspace_id = 'testString' + version_description = 'testString' + training_parameters = classifications_training_parameters_model + + # Invoke method + response = _service.update_classifications_model( + model_id, + language, + training_data, + training_data_content_type=training_data_content_type, + name=name, + user_metadata=user_metadata, + description=description, + model_version=model_version, + workspace_id=workspace_id, + version_description=version_description, + training_parameters=training_parameters, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_update_classifications_model_all_params_with_retries(self): + # Enable retries and run test_update_classifications_model_all_params. + _service.enable_retries() + self.test_update_classifications_model_all_params() + + # Disable retries and run test_update_classifications_model_all_params. + _service.disable_retries() + self.test_update_classifications_model_all_params() + + @responses.activate + def test_update_classifications_model_required_params(self): + """ + test_update_classifications_model_required_params() + """ + # Set up mock + url = preprocess_url('/v1/models/classifications/testString') + mock_response = '{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.PUT, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + model_id = 'testString' + language = 'testString' + training_data = io.BytesIO(b'This is a mock file.').getvalue() + + # Invoke method + response = _service.update_classifications_model( + model_id, + language, + training_data, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_update_classifications_model_required_params_with_retries(self): + # Enable retries and run test_update_classifications_model_required_params. + _service.enable_retries() + self.test_update_classifications_model_required_params() + + # Disable retries and run test_update_classifications_model_required_params. + _service.disable_retries() + self.test_update_classifications_model_required_params() + + @responses.activate + def test_update_classifications_model_value_error(self): + """ + test_update_classifications_model_value_error() + """ + # Set up mock + url = preprocess_url('/v1/models/classifications/testString') + mock_response = '{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}' + responses.add( + responses.PUT, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + model_id = 'testString' + language = 'testString' + training_data = io.BytesIO(b'This is a mock file.').getvalue() + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "model_id": model_id, + "language": language, + "training_data": training_data, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.update_classifications_model(**req_copy) + + def test_update_classifications_model_value_error_with_retries(self): + # Enable retries and run test_update_classifications_model_value_error. + _service.enable_retries() + self.test_update_classifications_model_value_error() + + # Disable retries and run test_update_classifications_model_value_error. + _service.disable_retries() + self.test_update_classifications_model_value_error() + + +class TestDeleteClassificationsModel: + """ + Test Class for delete_classifications_model + """ + + @responses.activate + def test_delete_classifications_model_all_params(self): + """ + delete_classifications_model() + """ + # Set up mock + url = preprocess_url('/v1/models/classifications/testString') + mock_response = '{"deleted": "deleted"}' + responses.add( + responses.DELETE, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + model_id = 'testString' + + # Invoke method + response = _service.delete_classifications_model( + model_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_delete_classifications_model_all_params_with_retries(self): + # Enable retries and run test_delete_classifications_model_all_params. + _service.enable_retries() + self.test_delete_classifications_model_all_params() + + # Disable retries and run test_delete_classifications_model_all_params. + _service.disable_retries() + self.test_delete_classifications_model_all_params() + + @responses.activate + def test_delete_classifications_model_value_error(self): + """ + test_delete_classifications_model_value_error() + """ + # Set up mock + url = preprocess_url('/v1/models/classifications/testString') + mock_response = '{"deleted": "deleted"}' + responses.add( + responses.DELETE, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + model_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "model_id": model_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_classifications_model(**req_copy) + + def test_delete_classifications_model_value_error_with_retries(self): + # Enable retries and run test_delete_classifications_model_value_error. + _service.enable_retries() + self.test_delete_classifications_model_value_error() + + # Disable retries and run test_delete_classifications_model_value_error. + _service.disable_retries() + self.test_delete_classifications_model_value_error() + + +# endregion +############################################################################## +# End of Service: ManageClassificationsModels +############################################################################## + + +############################################################################## +# Start of Model Tests +############################################################################## +# region + + +class TestModel_AnalysisResults: + """ + Test Class for AnalysisResults + """ + + def test_analysis_results_serialization(self): + """ + Test serialization/deserialization for AnalysisResults + """ + + # Construct dict forms of any model objects needed in order to build this model. + + analysis_results_usage_model = {} # AnalysisResultsUsage + analysis_results_usage_model['features'] = 38 + analysis_results_usage_model['text_characters'] = 38 + analysis_results_usage_model['text_units'] = 38 + + concepts_result_model = {} # ConceptsResult + concepts_result_model['text'] = 'Social network service' + concepts_result_model['relevance'] = 0.92186 + concepts_result_model['dbpedia_resource'] = 'http://dbpedia.org/resource/Social_network_service' + + entity_mention_model = {} # EntityMention + entity_mention_model['text'] = 'testString' + entity_mention_model['location'] = [38] + entity_mention_model['confidence'] = 72.5 + + emotion_scores_model = {} # EmotionScores + emotion_scores_model['anger'] = 72.5 + emotion_scores_model['disgust'] = 72.5 + emotion_scores_model['fear'] = 72.5 + emotion_scores_model['joy'] = 72.5 + emotion_scores_model['sadness'] = 72.5 + + feature_sentiment_results_model = {} # FeatureSentimentResults + feature_sentiment_results_model['score'] = 72.5 + + disambiguation_result_model = {} # DisambiguationResult + disambiguation_result_model['name'] = 'testString' + disambiguation_result_model['dbpedia_resource'] = 'testString' + disambiguation_result_model['subtype'] = ['testString'] + + entities_result_model = {} # EntitiesResult + entities_result_model['type'] = 'testString' + entities_result_model['text'] = 'Social network service' + entities_result_model['relevance'] = 0.92186 + entities_result_model['confidence'] = 72.5 + entities_result_model['mentions'] = [entity_mention_model] + entities_result_model['count'] = 38 + entities_result_model['emotion'] = emotion_scores_model + entities_result_model['sentiment'] = feature_sentiment_results_model + entities_result_model['disambiguation'] = disambiguation_result_model + + keywords_result_model = {} # KeywordsResult + keywords_result_model['count'] = 1 + keywords_result_model['relevance'] = 0.864624 + keywords_result_model['text'] = 'curated online courses' + keywords_result_model['emotion'] = emotion_scores_model + keywords_result_model['sentiment'] = feature_sentiment_results_model + + categories_relevant_text_model = {} # CategoriesRelevantText + categories_relevant_text_model['text'] = 'testString' + + categories_result_explanation_model = {} # CategoriesResultExplanation + categories_result_explanation_model['relevant_text'] = [categories_relevant_text_model] + + categories_result_model = {} # CategoriesResult + categories_result_model['label'] = '/technology and computing/computing/computer software and applications' + categories_result_model['score'] = 0.594296 + categories_result_model['explanation'] = categories_result_explanation_model + + classifications_result_model = {} # ClassificationsResult + classifications_result_model['class_name'] = 'temperature' + classifications_result_model['confidence'] = 0.562519 + + document_emotion_results_model = {} # DocumentEmotionResults + document_emotion_results_model['emotion'] = emotion_scores_model + + targeted_emotion_results_model = {} # TargetedEmotionResults + targeted_emotion_results_model['text'] = 'testString' + targeted_emotion_results_model['emotion'] = emotion_scores_model + + emotion_result_model = {} # EmotionResult + emotion_result_model['document'] = document_emotion_results_model + emotion_result_model['targets'] = [targeted_emotion_results_model] + + author_model = {} # Author + author_model['name'] = 'testString' + + feed_model = {} # Feed + feed_model['link'] = 'testString' + + features_results_metadata_model = {} # FeaturesResultsMetadata + features_results_metadata_model['authors'] = [author_model] + features_results_metadata_model['publication_date'] = 'testString' + features_results_metadata_model['title'] = 'testString' + features_results_metadata_model['image'] = 'testString' + features_results_metadata_model['feeds'] = [feed_model] + + relation_entity_model = {} # RelationEntity + relation_entity_model['text'] = 'Best Actor' + relation_entity_model['type'] = 'EntertainmentAward' + + relation_argument_model = {} # RelationArgument + relation_argument_model['entities'] = [relation_entity_model] + relation_argument_model['location'] = [22, 32] + relation_argument_model['text'] = 'Best Actor' + + relations_result_model = {} # RelationsResult + relations_result_model['score'] = 0.680715 + relations_result_model['sentence'] = 'Leonardo DiCaprio won Best Actor in a Leading Role for his performance.' + relations_result_model['type'] = 'awardedTo' + relations_result_model['arguments'] = [relation_argument_model] + + semantic_roles_entity_model = {} # SemanticRolesEntity + semantic_roles_entity_model['type'] = 'testString' + semantic_roles_entity_model['text'] = 'testString' + + semantic_roles_keyword_model = {} # SemanticRolesKeyword + semantic_roles_keyword_model['text'] = 'testString' + + semantic_roles_result_subject_model = {} # SemanticRolesResultSubject + semantic_roles_result_subject_model['text'] = 'IBM' + semantic_roles_result_subject_model['entities'] = [semantic_roles_entity_model] + semantic_roles_result_subject_model['keywords'] = [semantic_roles_keyword_model] + + semantic_roles_verb_model = {} # SemanticRolesVerb + semantic_roles_verb_model['text'] = 'have' + semantic_roles_verb_model['tense'] = 'present' + + semantic_roles_result_action_model = {} # SemanticRolesResultAction + semantic_roles_result_action_model['text'] = 'has' + semantic_roles_result_action_model['normalized'] = 'have' + semantic_roles_result_action_model['verb'] = semantic_roles_verb_model + + semantic_roles_result_object_model = {} # SemanticRolesResultObject + semantic_roles_result_object_model['text'] = 'one of the largest workforces in the world' + semantic_roles_result_object_model['keywords'] = [semantic_roles_keyword_model] + + semantic_roles_result_model = {} # SemanticRolesResult + semantic_roles_result_model['sentence'] = 'IBM has one of the largest workforces in the world' + semantic_roles_result_model['subject'] = semantic_roles_result_subject_model + semantic_roles_result_model['action'] = semantic_roles_result_action_model + semantic_roles_result_model['object'] = semantic_roles_result_object_model + + document_sentiment_results_model = {} # DocumentSentimentResults + document_sentiment_results_model['label'] = 'testString' + document_sentiment_results_model['score'] = 72.5 + + targeted_sentiment_results_model = {} # TargetedSentimentResults + targeted_sentiment_results_model['text'] = 'testString' + targeted_sentiment_results_model['score'] = 72.5 + + sentiment_result_model = {} # SentimentResult + sentiment_result_model['document'] = document_sentiment_results_model + sentiment_result_model['targets'] = [targeted_sentiment_results_model] + + token_result_model = {} # TokenResult + token_result_model['text'] = 'testString' + token_result_model['part_of_speech'] = 'ADJ' + token_result_model['location'] = [38] + token_result_model['lemma'] = 'testString' + + sentence_result_model = {} # SentenceResult + sentence_result_model['text'] = 'testString' + sentence_result_model['location'] = [38] + + syntax_result_model = {} # SyntaxResult + syntax_result_model['tokens'] = [token_result_model] + syntax_result_model['sentences'] = [sentence_result_model] + + # Construct a json representation of a AnalysisResults model + analysis_results_model_json = {} + analysis_results_model_json['language'] = 'testString' + analysis_results_model_json['analyzed_text'] = 'testString' + analysis_results_model_json['retrieved_url'] = 'testString' + analysis_results_model_json['usage'] = analysis_results_usage_model + analysis_results_model_json['concepts'] = [concepts_result_model] + analysis_results_model_json['entities'] = [entities_result_model] + analysis_results_model_json['keywords'] = [keywords_result_model] + analysis_results_model_json['categories'] = [categories_result_model] + analysis_results_model_json['classifications'] = [classifications_result_model] + analysis_results_model_json['emotion'] = emotion_result_model + analysis_results_model_json['metadata'] = features_results_metadata_model + analysis_results_model_json['relations'] = [relations_result_model] + analysis_results_model_json['semantic_roles'] = [semantic_roles_result_model] + analysis_results_model_json['sentiment'] = sentiment_result_model + analysis_results_model_json['syntax'] = syntax_result_model + + # Construct a model instance of AnalysisResults by calling from_dict on the json representation + analysis_results_model = AnalysisResults.from_dict(analysis_results_model_json) + assert analysis_results_model != False + + # Construct a model instance of AnalysisResults by calling from_dict on the json representation + analysis_results_model_dict = AnalysisResults.from_dict(analysis_results_model_json).__dict__ + analysis_results_model2 = AnalysisResults(**analysis_results_model_dict) + + # Verify the model instances are equivalent + assert analysis_results_model == analysis_results_model2 + + # Convert model instance back to dict and verify no loss of data + analysis_results_model_json2 = analysis_results_model.to_dict() + assert analysis_results_model_json2 == analysis_results_model_json + + +class TestModel_AnalysisResultsUsage: + """ + Test Class for AnalysisResultsUsage + """ + + def test_analysis_results_usage_serialization(self): + """ + Test serialization/deserialization for AnalysisResultsUsage + """ + + # Construct a json representation of a AnalysisResultsUsage model + analysis_results_usage_model_json = {} + analysis_results_usage_model_json['features'] = 38 + analysis_results_usage_model_json['text_characters'] = 38 + analysis_results_usage_model_json['text_units'] = 38 + + # Construct a model instance of AnalysisResultsUsage by calling from_dict on the json representation + analysis_results_usage_model = AnalysisResultsUsage.from_dict(analysis_results_usage_model_json) + assert analysis_results_usage_model != False + + # Construct a model instance of AnalysisResultsUsage by calling from_dict on the json representation + analysis_results_usage_model_dict = AnalysisResultsUsage.from_dict(analysis_results_usage_model_json).__dict__ + analysis_results_usage_model2 = AnalysisResultsUsage(**analysis_results_usage_model_dict) + + # Verify the model instances are equivalent + assert analysis_results_usage_model == analysis_results_usage_model2 + + # Convert model instance back to dict and verify no loss of data + analysis_results_usage_model_json2 = analysis_results_usage_model.to_dict() + assert analysis_results_usage_model_json2 == analysis_results_usage_model_json + + +class TestModel_Author: + """ + Test Class for Author + """ + + def test_author_serialization(self): + """ + Test serialization/deserialization for Author + """ + + # Construct a json representation of a Author model + author_model_json = {} + author_model_json['name'] = 'testString' + + # Construct a model instance of Author by calling from_dict on the json representation + author_model = Author.from_dict(author_model_json) + assert author_model != False + + # Construct a model instance of Author by calling from_dict on the json representation + author_model_dict = Author.from_dict(author_model_json).__dict__ + author_model2 = Author(**author_model_dict) + + # Verify the model instances are equivalent + assert author_model == author_model2 + + # Convert model instance back to dict and verify no loss of data + author_model_json2 = author_model.to_dict() + assert author_model_json2 == author_model_json + + +class TestModel_CategoriesModel: + """ + Test Class for CategoriesModel + """ + + def test_categories_model_serialization(self): + """ + Test serialization/deserialization for CategoriesModel + """ + + # Construct dict forms of any model objects needed in order to build this model. + + notice_model = {} # Notice + + # Construct a json representation of a CategoriesModel model + categories_model_model_json = {} + categories_model_model_json['name'] = 'testString' + categories_model_model_json['user_metadata'] = {'region': 'North America', 'latest': True} + categories_model_model_json['language'] = 'testString' + categories_model_model_json['description'] = 'testString' + categories_model_model_json['model_version'] = 'testString' + categories_model_model_json['workspace_id'] = 'testString' + categories_model_model_json['version_description'] = 'testString' + categories_model_model_json['features'] = ['testString'] + categories_model_model_json['status'] = 'starting' + categories_model_model_json['model_id'] = 'testString' + categories_model_model_json['created'] = '2019-01-01T12:00:00Z' + categories_model_model_json['notices'] = [notice_model] + categories_model_model_json['last_trained'] = '2019-01-01T12:00:00Z' + categories_model_model_json['last_deployed'] = '2019-01-01T12:00:00Z' + + # Construct a model instance of CategoriesModel by calling from_dict on the json representation + categories_model_model = CategoriesModel.from_dict(categories_model_model_json) + assert categories_model_model != False + + # Construct a model instance of CategoriesModel by calling from_dict on the json representation + categories_model_model_dict = CategoriesModel.from_dict(categories_model_model_json).__dict__ + categories_model_model2 = CategoriesModel(**categories_model_model_dict) + + # Verify the model instances are equivalent + assert categories_model_model == categories_model_model2 + + # Convert model instance back to dict and verify no loss of data + categories_model_model_json2 = categories_model_model.to_dict() + assert categories_model_model_json2 == categories_model_model_json + + +class TestModel_CategoriesModelList: + """ + Test Class for CategoriesModelList + """ + + def test_categories_model_list_serialization(self): + """ + Test serialization/deserialization for CategoriesModelList + """ + + # Construct dict forms of any model objects needed in order to build this model. + + notice_model = {} # Notice + + categories_model_model = {} # CategoriesModel + categories_model_model['name'] = 'testString' + categories_model_model['user_metadata'] = {'region': 'North America', 'latest': True} + categories_model_model['language'] = 'testString' + categories_model_model['description'] = 'testString' + categories_model_model['model_version'] = 'testString' + categories_model_model['workspace_id'] = 'testString' + categories_model_model['version_description'] = 'testString' + categories_model_model['features'] = ['testString'] + categories_model_model['status'] = 'starting' + categories_model_model['model_id'] = 'testString' + categories_model_model['created'] = '2019-01-01T12:00:00Z' + categories_model_model['notices'] = [notice_model] + categories_model_model['last_trained'] = '2019-01-01T12:00:00Z' + categories_model_model['last_deployed'] = '2019-01-01T12:00:00Z' + + # Construct a json representation of a CategoriesModelList model + categories_model_list_model_json = {} + categories_model_list_model_json['models'] = [categories_model_model] + + # Construct a model instance of CategoriesModelList by calling from_dict on the json representation + categories_model_list_model = CategoriesModelList.from_dict(categories_model_list_model_json) + assert categories_model_list_model != False + + # Construct a model instance of CategoriesModelList by calling from_dict on the json representation + categories_model_list_model_dict = CategoriesModelList.from_dict(categories_model_list_model_json).__dict__ + categories_model_list_model2 = CategoriesModelList(**categories_model_list_model_dict) + + # Verify the model instances are equivalent + assert categories_model_list_model == categories_model_list_model2 + + # Convert model instance back to dict and verify no loss of data + categories_model_list_model_json2 = categories_model_list_model.to_dict() + assert categories_model_list_model_json2 == categories_model_list_model_json + + +class TestModel_CategoriesOptions: + """ + Test Class for CategoriesOptions + """ + + def test_categories_options_serialization(self): + """ + Test serialization/deserialization for CategoriesOptions + """ + + # Construct a json representation of a CategoriesOptions model + categories_options_model_json = {} + categories_options_model_json['explanation'] = False + categories_options_model_json['limit'] = 3 + categories_options_model_json['model'] = 'testString' + + # Construct a model instance of CategoriesOptions by calling from_dict on the json representation + categories_options_model = CategoriesOptions.from_dict(categories_options_model_json) + assert categories_options_model != False + + # Construct a model instance of CategoriesOptions by calling from_dict on the json representation + categories_options_model_dict = CategoriesOptions.from_dict(categories_options_model_json).__dict__ + categories_options_model2 = CategoriesOptions(**categories_options_model_dict) + + # Verify the model instances are equivalent + assert categories_options_model == categories_options_model2 + + # Convert model instance back to dict and verify no loss of data + categories_options_model_json2 = categories_options_model.to_dict() + assert categories_options_model_json2 == categories_options_model_json + + +class TestModel_CategoriesRelevantText: + """ + Test Class for CategoriesRelevantText + """ + + def test_categories_relevant_text_serialization(self): + """ + Test serialization/deserialization for CategoriesRelevantText + """ + + # Construct a json representation of a CategoriesRelevantText model + categories_relevant_text_model_json = {} + categories_relevant_text_model_json['text'] = 'testString' + + # Construct a model instance of CategoriesRelevantText by calling from_dict on the json representation + categories_relevant_text_model = CategoriesRelevantText.from_dict(categories_relevant_text_model_json) + assert categories_relevant_text_model != False + + # Construct a model instance of CategoriesRelevantText by calling from_dict on the json representation + categories_relevant_text_model_dict = CategoriesRelevantText.from_dict(categories_relevant_text_model_json).__dict__ + categories_relevant_text_model2 = CategoriesRelevantText(**categories_relevant_text_model_dict) + + # Verify the model instances are equivalent + assert categories_relevant_text_model == categories_relevant_text_model2 + + # Convert model instance back to dict and verify no loss of data + categories_relevant_text_model_json2 = categories_relevant_text_model.to_dict() + assert categories_relevant_text_model_json2 == categories_relevant_text_model_json + + +class TestModel_CategoriesResult: + """ + Test Class for CategoriesResult + """ + + def test_categories_result_serialization(self): + """ + Test serialization/deserialization for CategoriesResult + """ + + # Construct dict forms of any model objects needed in order to build this model. + + categories_relevant_text_model = {} # CategoriesRelevantText + categories_relevant_text_model['text'] = 'testString' + + categories_result_explanation_model = {} # CategoriesResultExplanation + categories_result_explanation_model['relevant_text'] = [categories_relevant_text_model] + + # Construct a json representation of a CategoriesResult model + categories_result_model_json = {} + categories_result_model_json['label'] = 'testString' + categories_result_model_json['score'] = 72.5 + categories_result_model_json['explanation'] = categories_result_explanation_model + + # Construct a model instance of CategoriesResult by calling from_dict on the json representation + categories_result_model = CategoriesResult.from_dict(categories_result_model_json) + assert categories_result_model != False + + # Construct a model instance of CategoriesResult by calling from_dict on the json representation + categories_result_model_dict = CategoriesResult.from_dict(categories_result_model_json).__dict__ + categories_result_model2 = CategoriesResult(**categories_result_model_dict) + + # Verify the model instances are equivalent + assert categories_result_model == categories_result_model2 + + # Convert model instance back to dict and verify no loss of data + categories_result_model_json2 = categories_result_model.to_dict() + assert categories_result_model_json2 == categories_result_model_json + + +class TestModel_CategoriesResultExplanation: + """ + Test Class for CategoriesResultExplanation + """ + + def test_categories_result_explanation_serialization(self): + """ + Test serialization/deserialization for CategoriesResultExplanation + """ + + # Construct dict forms of any model objects needed in order to build this model. + + categories_relevant_text_model = {} # CategoriesRelevantText + categories_relevant_text_model['text'] = 'testString' + + # Construct a json representation of a CategoriesResultExplanation model + categories_result_explanation_model_json = {} + categories_result_explanation_model_json['relevant_text'] = [categories_relevant_text_model] + + # Construct a model instance of CategoriesResultExplanation by calling from_dict on the json representation + categories_result_explanation_model = CategoriesResultExplanation.from_dict(categories_result_explanation_model_json) + assert categories_result_explanation_model != False + + # Construct a model instance of CategoriesResultExplanation by calling from_dict on the json representation + categories_result_explanation_model_dict = CategoriesResultExplanation.from_dict(categories_result_explanation_model_json).__dict__ + categories_result_explanation_model2 = CategoriesResultExplanation(**categories_result_explanation_model_dict) + + # Verify the model instances are equivalent + assert categories_result_explanation_model == categories_result_explanation_model2 + + # Convert model instance back to dict and verify no loss of data + categories_result_explanation_model_json2 = categories_result_explanation_model.to_dict() + assert categories_result_explanation_model_json2 == categories_result_explanation_model_json + + +class TestModel_ClassificationsModel: + """ + Test Class for ClassificationsModel + """ + + def test_classifications_model_serialization(self): + """ + Test serialization/deserialization for ClassificationsModel + """ + + # Construct dict forms of any model objects needed in order to build this model. + + notice_model = {} # Notice + + # Construct a json representation of a ClassificationsModel model + classifications_model_model_json = {} + classifications_model_model_json['name'] = 'testString' + classifications_model_model_json['user_metadata'] = {'region': 'North America', 'latest': True} + classifications_model_model_json['language'] = 'testString' + classifications_model_model_json['description'] = 'testString' + classifications_model_model_json['model_version'] = 'testString' + classifications_model_model_json['workspace_id'] = 'testString' + classifications_model_model_json['version_description'] = 'testString' + classifications_model_model_json['features'] = ['testString'] + classifications_model_model_json['status'] = 'starting' + classifications_model_model_json['model_id'] = 'testString' + classifications_model_model_json['created'] = '2019-01-01T12:00:00Z' + classifications_model_model_json['notices'] = [notice_model] + classifications_model_model_json['last_trained'] = '2019-01-01T12:00:00Z' + classifications_model_model_json['last_deployed'] = '2019-01-01T12:00:00Z' + + # Construct a model instance of ClassificationsModel by calling from_dict on the json representation + classifications_model_model = ClassificationsModel.from_dict(classifications_model_model_json) + assert classifications_model_model != False + + # Construct a model instance of ClassificationsModel by calling from_dict on the json representation + classifications_model_model_dict = ClassificationsModel.from_dict(classifications_model_model_json).__dict__ + classifications_model_model2 = ClassificationsModel(**classifications_model_model_dict) + + # Verify the model instances are equivalent + assert classifications_model_model == classifications_model_model2 + + # Convert model instance back to dict and verify no loss of data + classifications_model_model_json2 = classifications_model_model.to_dict() + assert classifications_model_model_json2 == classifications_model_model_json + + +class TestModel_ClassificationsModelList: + """ + Test Class for ClassificationsModelList + """ + + def test_classifications_model_list_serialization(self): + """ + Test serialization/deserialization for ClassificationsModelList + """ + + # Construct dict forms of any model objects needed in order to build this model. + + notice_model = {} # Notice + + classifications_model_model = {} # ClassificationsModel + classifications_model_model['name'] = 'testString' + classifications_model_model['user_metadata'] = {'region': 'North America', 'latest': True} + classifications_model_model['language'] = 'testString' + classifications_model_model['description'] = 'testString' + classifications_model_model['model_version'] = 'testString' + classifications_model_model['workspace_id'] = 'testString' + classifications_model_model['version_description'] = 'testString' + classifications_model_model['features'] = ['testString'] + classifications_model_model['status'] = 'starting' + classifications_model_model['model_id'] = 'testString' + classifications_model_model['created'] = '2019-01-01T12:00:00Z' + classifications_model_model['notices'] = [notice_model] + classifications_model_model['last_trained'] = '2019-01-01T12:00:00Z' + classifications_model_model['last_deployed'] = '2019-01-01T12:00:00Z' + + # Construct a json representation of a ClassificationsModelList model + classifications_model_list_model_json = {} + classifications_model_list_model_json['models'] = [classifications_model_model] + + # Construct a model instance of ClassificationsModelList by calling from_dict on the json representation + classifications_model_list_model = ClassificationsModelList.from_dict(classifications_model_list_model_json) + assert classifications_model_list_model != False + + # Construct a model instance of ClassificationsModelList by calling from_dict on the json representation + classifications_model_list_model_dict = ClassificationsModelList.from_dict(classifications_model_list_model_json).__dict__ + classifications_model_list_model2 = ClassificationsModelList(**classifications_model_list_model_dict) + + # Verify the model instances are equivalent + assert classifications_model_list_model == classifications_model_list_model2 + + # Convert model instance back to dict and verify no loss of data + classifications_model_list_model_json2 = classifications_model_list_model.to_dict() + assert classifications_model_list_model_json2 == classifications_model_list_model_json + + +class TestModel_ClassificationsOptions: + """ + Test Class for ClassificationsOptions + """ + + def test_classifications_options_serialization(self): + """ + Test serialization/deserialization for ClassificationsOptions + """ + + # Construct a json representation of a ClassificationsOptions model + classifications_options_model_json = {} + classifications_options_model_json['model'] = 'testString' + + # Construct a model instance of ClassificationsOptions by calling from_dict on the json representation + classifications_options_model = ClassificationsOptions.from_dict(classifications_options_model_json) + assert classifications_options_model != False + + # Construct a model instance of ClassificationsOptions by calling from_dict on the json representation + classifications_options_model_dict = ClassificationsOptions.from_dict(classifications_options_model_json).__dict__ + classifications_options_model2 = ClassificationsOptions(**classifications_options_model_dict) + + # Verify the model instances are equivalent + assert classifications_options_model == classifications_options_model2 + + # Convert model instance back to dict and verify no loss of data + classifications_options_model_json2 = classifications_options_model.to_dict() + assert classifications_options_model_json2 == classifications_options_model_json + + +class TestModel_ClassificationsResult: + """ + Test Class for ClassificationsResult + """ + + def test_classifications_result_serialization(self): + """ + Test serialization/deserialization for ClassificationsResult + """ + + # Construct a json representation of a ClassificationsResult model + classifications_result_model_json = {} + classifications_result_model_json['class_name'] = 'testString' + classifications_result_model_json['confidence'] = 72.5 + + # Construct a model instance of ClassificationsResult by calling from_dict on the json representation + classifications_result_model = ClassificationsResult.from_dict(classifications_result_model_json) + assert classifications_result_model != False + + # Construct a model instance of ClassificationsResult by calling from_dict on the json representation + classifications_result_model_dict = ClassificationsResult.from_dict(classifications_result_model_json).__dict__ + classifications_result_model2 = ClassificationsResult(**classifications_result_model_dict) + + # Verify the model instances are equivalent + assert classifications_result_model == classifications_result_model2 + + # Convert model instance back to dict and verify no loss of data + classifications_result_model_json2 = classifications_result_model.to_dict() + assert classifications_result_model_json2 == classifications_result_model_json + + +class TestModel_ClassificationsTrainingParameters: + """ + Test Class for ClassificationsTrainingParameters + """ + + def test_classifications_training_parameters_serialization(self): + """ + Test serialization/deserialization for ClassificationsTrainingParameters + """ + + # Construct a json representation of a ClassificationsTrainingParameters model + classifications_training_parameters_model_json = {} + classifications_training_parameters_model_json['model_type'] = 'single_label' + + # Construct a model instance of ClassificationsTrainingParameters by calling from_dict on the json representation + classifications_training_parameters_model = ClassificationsTrainingParameters.from_dict(classifications_training_parameters_model_json) + assert classifications_training_parameters_model != False + + # Construct a model instance of ClassificationsTrainingParameters by calling from_dict on the json representation + classifications_training_parameters_model_dict = ClassificationsTrainingParameters.from_dict(classifications_training_parameters_model_json).__dict__ + classifications_training_parameters_model2 = ClassificationsTrainingParameters(**classifications_training_parameters_model_dict) + + # Verify the model instances are equivalent + assert classifications_training_parameters_model == classifications_training_parameters_model2 + + # Convert model instance back to dict and verify no loss of data + classifications_training_parameters_model_json2 = classifications_training_parameters_model.to_dict() + assert classifications_training_parameters_model_json2 == classifications_training_parameters_model_json + + +class TestModel_ConceptsOptions: + """ + Test Class for ConceptsOptions + """ + + def test_concepts_options_serialization(self): + """ + Test serialization/deserialization for ConceptsOptions + """ + + # Construct a json representation of a ConceptsOptions model + concepts_options_model_json = {} + concepts_options_model_json['limit'] = 8 + + # Construct a model instance of ConceptsOptions by calling from_dict on the json representation + concepts_options_model = ConceptsOptions.from_dict(concepts_options_model_json) + assert concepts_options_model != False + + # Construct a model instance of ConceptsOptions by calling from_dict on the json representation + concepts_options_model_dict = ConceptsOptions.from_dict(concepts_options_model_json).__dict__ + concepts_options_model2 = ConceptsOptions(**concepts_options_model_dict) + + # Verify the model instances are equivalent + assert concepts_options_model == concepts_options_model2 + + # Convert model instance back to dict and verify no loss of data + concepts_options_model_json2 = concepts_options_model.to_dict() + assert concepts_options_model_json2 == concepts_options_model_json + + +class TestModel_ConceptsResult: + """ + Test Class for ConceptsResult + """ + + def test_concepts_result_serialization(self): + """ + Test serialization/deserialization for ConceptsResult + """ + + # Construct a json representation of a ConceptsResult model + concepts_result_model_json = {} + concepts_result_model_json['text'] = 'testString' + concepts_result_model_json['relevance'] = 72.5 + concepts_result_model_json['dbpedia_resource'] = 'testString' + + # Construct a model instance of ConceptsResult by calling from_dict on the json representation + concepts_result_model = ConceptsResult.from_dict(concepts_result_model_json) + assert concepts_result_model != False + + # Construct a model instance of ConceptsResult by calling from_dict on the json representation + concepts_result_model_dict = ConceptsResult.from_dict(concepts_result_model_json).__dict__ + concepts_result_model2 = ConceptsResult(**concepts_result_model_dict) + + # Verify the model instances are equivalent + assert concepts_result_model == concepts_result_model2 + + # Convert model instance back to dict and verify no loss of data + concepts_result_model_json2 = concepts_result_model.to_dict() + assert concepts_result_model_json2 == concepts_result_model_json + + +class TestModel_DeleteModelResults: + """ + Test Class for DeleteModelResults + """ + + def test_delete_model_results_serialization(self): + """ + Test serialization/deserialization for DeleteModelResults + """ + + # Construct a json representation of a DeleteModelResults model + delete_model_results_model_json = {} + delete_model_results_model_json['deleted'] = 'testString' + + # Construct a model instance of DeleteModelResults by calling from_dict on the json representation + delete_model_results_model = DeleteModelResults.from_dict(delete_model_results_model_json) + assert delete_model_results_model != False + + # Construct a model instance of DeleteModelResults by calling from_dict on the json representation + delete_model_results_model_dict = DeleteModelResults.from_dict(delete_model_results_model_json).__dict__ + delete_model_results_model2 = DeleteModelResults(**delete_model_results_model_dict) + + # Verify the model instances are equivalent + assert delete_model_results_model == delete_model_results_model2 + + # Convert model instance back to dict and verify no loss of data + delete_model_results_model_json2 = delete_model_results_model.to_dict() + assert delete_model_results_model_json2 == delete_model_results_model_json + + +class TestModel_DisambiguationResult: + """ + Test Class for DisambiguationResult + """ + + def test_disambiguation_result_serialization(self): + """ + Test serialization/deserialization for DisambiguationResult + """ + + # Construct a json representation of a DisambiguationResult model + disambiguation_result_model_json = {} + disambiguation_result_model_json['name'] = 'testString' + disambiguation_result_model_json['dbpedia_resource'] = 'testString' + disambiguation_result_model_json['subtype'] = ['testString'] + + # Construct a model instance of DisambiguationResult by calling from_dict on the json representation + disambiguation_result_model = DisambiguationResult.from_dict(disambiguation_result_model_json) + assert disambiguation_result_model != False + + # Construct a model instance of DisambiguationResult by calling from_dict on the json representation + disambiguation_result_model_dict = DisambiguationResult.from_dict(disambiguation_result_model_json).__dict__ + disambiguation_result_model2 = DisambiguationResult(**disambiguation_result_model_dict) + + # Verify the model instances are equivalent + assert disambiguation_result_model == disambiguation_result_model2 + + # Convert model instance back to dict and verify no loss of data + disambiguation_result_model_json2 = disambiguation_result_model.to_dict() + assert disambiguation_result_model_json2 == disambiguation_result_model_json + + +class TestModel_DocumentEmotionResults: + """ + Test Class for DocumentEmotionResults + """ + + def test_document_emotion_results_serialization(self): + """ + Test serialization/deserialization for DocumentEmotionResults + """ + + # Construct dict forms of any model objects needed in order to build this model. + + emotion_scores_model = {} # EmotionScores + emotion_scores_model['anger'] = 72.5 + emotion_scores_model['disgust'] = 72.5 + emotion_scores_model['fear'] = 72.5 + emotion_scores_model['joy'] = 72.5 + emotion_scores_model['sadness'] = 72.5 + + # Construct a json representation of a DocumentEmotionResults model + document_emotion_results_model_json = {} + document_emotion_results_model_json['emotion'] = emotion_scores_model + + # Construct a model instance of DocumentEmotionResults by calling from_dict on the json representation + document_emotion_results_model = DocumentEmotionResults.from_dict(document_emotion_results_model_json) + assert document_emotion_results_model != False + + # Construct a model instance of DocumentEmotionResults by calling from_dict on the json representation + document_emotion_results_model_dict = DocumentEmotionResults.from_dict(document_emotion_results_model_json).__dict__ + document_emotion_results_model2 = DocumentEmotionResults(**document_emotion_results_model_dict) + + # Verify the model instances are equivalent + assert document_emotion_results_model == document_emotion_results_model2 + + # Convert model instance back to dict and verify no loss of data + document_emotion_results_model_json2 = document_emotion_results_model.to_dict() + assert document_emotion_results_model_json2 == document_emotion_results_model_json + + +class TestModel_DocumentSentimentResults: + """ + Test Class for DocumentSentimentResults + """ + + def test_document_sentiment_results_serialization(self): + """ + Test serialization/deserialization for DocumentSentimentResults + """ + + # Construct a json representation of a DocumentSentimentResults model + document_sentiment_results_model_json = {} + document_sentiment_results_model_json['label'] = 'testString' + document_sentiment_results_model_json['score'] = 72.5 + + # Construct a model instance of DocumentSentimentResults by calling from_dict on the json representation + document_sentiment_results_model = DocumentSentimentResults.from_dict(document_sentiment_results_model_json) + assert document_sentiment_results_model != False + + # Construct a model instance of DocumentSentimentResults by calling from_dict on the json representation + document_sentiment_results_model_dict = DocumentSentimentResults.from_dict(document_sentiment_results_model_json).__dict__ + document_sentiment_results_model2 = DocumentSentimentResults(**document_sentiment_results_model_dict) + + # Verify the model instances are equivalent + assert document_sentiment_results_model == document_sentiment_results_model2 + + # Convert model instance back to dict and verify no loss of data + document_sentiment_results_model_json2 = document_sentiment_results_model.to_dict() + assert document_sentiment_results_model_json2 == document_sentiment_results_model_json + + +class TestModel_EmotionOptions: + """ + Test Class for EmotionOptions + """ + + def test_emotion_options_serialization(self): + """ + Test serialization/deserialization for EmotionOptions + """ + + # Construct a json representation of a EmotionOptions model + emotion_options_model_json = {} + emotion_options_model_json['document'] = True + emotion_options_model_json['targets'] = ['testString'] + + # Construct a model instance of EmotionOptions by calling from_dict on the json representation + emotion_options_model = EmotionOptions.from_dict(emotion_options_model_json) + assert emotion_options_model != False + + # Construct a model instance of EmotionOptions by calling from_dict on the json representation + emotion_options_model_dict = EmotionOptions.from_dict(emotion_options_model_json).__dict__ + emotion_options_model2 = EmotionOptions(**emotion_options_model_dict) + + # Verify the model instances are equivalent + assert emotion_options_model == emotion_options_model2 + + # Convert model instance back to dict and verify no loss of data + emotion_options_model_json2 = emotion_options_model.to_dict() + assert emotion_options_model_json2 == emotion_options_model_json + + +class TestModel_EmotionResult: + """ + Test Class for EmotionResult + """ + + def test_emotion_result_serialization(self): + """ + Test serialization/deserialization for EmotionResult + """ + + # Construct dict forms of any model objects needed in order to build this model. + + emotion_scores_model = {} # EmotionScores + emotion_scores_model['anger'] = 0.041796 + emotion_scores_model['disgust'] = 0.022637 + emotion_scores_model['fear'] = 0.033387 + emotion_scores_model['joy'] = 0.563273 + emotion_scores_model['sadness'] = 0.32665 + + document_emotion_results_model = {} # DocumentEmotionResults + document_emotion_results_model['emotion'] = emotion_scores_model + + targeted_emotion_results_model = {} # TargetedEmotionResults + targeted_emotion_results_model['text'] = 'apples' + targeted_emotion_results_model['emotion'] = emotion_scores_model + + # Construct a json representation of a EmotionResult model + emotion_result_model_json = {} + emotion_result_model_json['document'] = document_emotion_results_model + emotion_result_model_json['targets'] = [targeted_emotion_results_model] + + # Construct a model instance of EmotionResult by calling from_dict on the json representation + emotion_result_model = EmotionResult.from_dict(emotion_result_model_json) + assert emotion_result_model != False + + # Construct a model instance of EmotionResult by calling from_dict on the json representation + emotion_result_model_dict = EmotionResult.from_dict(emotion_result_model_json).__dict__ + emotion_result_model2 = EmotionResult(**emotion_result_model_dict) + + # Verify the model instances are equivalent + assert emotion_result_model == emotion_result_model2 + + # Convert model instance back to dict and verify no loss of data + emotion_result_model_json2 = emotion_result_model.to_dict() + assert emotion_result_model_json2 == emotion_result_model_json + + +class TestModel_EmotionScores: + """ + Test Class for EmotionScores + """ + + def test_emotion_scores_serialization(self): + """ + Test serialization/deserialization for EmotionScores + """ + + # Construct a json representation of a EmotionScores model + emotion_scores_model_json = {} + emotion_scores_model_json['anger'] = 72.5 + emotion_scores_model_json['disgust'] = 72.5 + emotion_scores_model_json['fear'] = 72.5 + emotion_scores_model_json['joy'] = 72.5 + emotion_scores_model_json['sadness'] = 72.5 + + # Construct a model instance of EmotionScores by calling from_dict on the json representation + emotion_scores_model = EmotionScores.from_dict(emotion_scores_model_json) + assert emotion_scores_model != False + + # Construct a model instance of EmotionScores by calling from_dict on the json representation + emotion_scores_model_dict = EmotionScores.from_dict(emotion_scores_model_json).__dict__ + emotion_scores_model2 = EmotionScores(**emotion_scores_model_dict) + + # Verify the model instances are equivalent + assert emotion_scores_model == emotion_scores_model2 + + # Convert model instance back to dict and verify no loss of data + emotion_scores_model_json2 = emotion_scores_model.to_dict() + assert emotion_scores_model_json2 == emotion_scores_model_json + + +class TestModel_EntitiesOptions: + """ + Test Class for EntitiesOptions + """ + + def test_entities_options_serialization(self): + """ + Test serialization/deserialization for EntitiesOptions + """ + + # Construct a json representation of a EntitiesOptions model + entities_options_model_json = {} + entities_options_model_json['limit'] = 50 + entities_options_model_json['mentions'] = False + entities_options_model_json['model'] = 'testString' + entities_options_model_json['sentiment'] = False + entities_options_model_json['emotion'] = False + + # Construct a model instance of EntitiesOptions by calling from_dict on the json representation + entities_options_model = EntitiesOptions.from_dict(entities_options_model_json) + assert entities_options_model != False + + # Construct a model instance of EntitiesOptions by calling from_dict on the json representation + entities_options_model_dict = EntitiesOptions.from_dict(entities_options_model_json).__dict__ + entities_options_model2 = EntitiesOptions(**entities_options_model_dict) + + # Verify the model instances are equivalent + assert entities_options_model == entities_options_model2 + + # Convert model instance back to dict and verify no loss of data + entities_options_model_json2 = entities_options_model.to_dict() + assert entities_options_model_json2 == entities_options_model_json + + +class TestModel_EntitiesResult: + """ + Test Class for EntitiesResult + """ + + def test_entities_result_serialization(self): + """ + Test serialization/deserialization for EntitiesResult + """ + + # Construct dict forms of any model objects needed in order to build this model. + + entity_mention_model = {} # EntityMention + entity_mention_model['text'] = 'testString' + entity_mention_model['location'] = [38] + entity_mention_model['confidence'] = 72.5 + + emotion_scores_model = {} # EmotionScores + emotion_scores_model['anger'] = 72.5 + emotion_scores_model['disgust'] = 72.5 + emotion_scores_model['fear'] = 72.5 + emotion_scores_model['joy'] = 72.5 + emotion_scores_model['sadness'] = 72.5 + + feature_sentiment_results_model = {} # FeatureSentimentResults + feature_sentiment_results_model['score'] = 72.5 + + disambiguation_result_model = {} # DisambiguationResult + disambiguation_result_model['name'] = 'testString' + disambiguation_result_model['dbpedia_resource'] = 'testString' + disambiguation_result_model['subtype'] = ['testString'] + + # Construct a json representation of a EntitiesResult model + entities_result_model_json = {} + entities_result_model_json['type'] = 'testString' + entities_result_model_json['text'] = 'testString' + entities_result_model_json['relevance'] = 72.5 + entities_result_model_json['confidence'] = 72.5 + entities_result_model_json['mentions'] = [entity_mention_model] + entities_result_model_json['count'] = 38 + entities_result_model_json['emotion'] = emotion_scores_model + entities_result_model_json['sentiment'] = feature_sentiment_results_model + entities_result_model_json['disambiguation'] = disambiguation_result_model + + # Construct a model instance of EntitiesResult by calling from_dict on the json representation + entities_result_model = EntitiesResult.from_dict(entities_result_model_json) + assert entities_result_model != False + + # Construct a model instance of EntitiesResult by calling from_dict on the json representation + entities_result_model_dict = EntitiesResult.from_dict(entities_result_model_json).__dict__ + entities_result_model2 = EntitiesResult(**entities_result_model_dict) + + # Verify the model instances are equivalent + assert entities_result_model == entities_result_model2 + + # Convert model instance back to dict and verify no loss of data + entities_result_model_json2 = entities_result_model.to_dict() + assert entities_result_model_json2 == entities_result_model_json + + +class TestModel_EntityMention: + """ + Test Class for EntityMention + """ + + def test_entity_mention_serialization(self): + """ + Test serialization/deserialization for EntityMention + """ + + # Construct a json representation of a EntityMention model + entity_mention_model_json = {} + entity_mention_model_json['text'] = 'testString' + entity_mention_model_json['location'] = [38] + entity_mention_model_json['confidence'] = 72.5 + + # Construct a model instance of EntityMention by calling from_dict on the json representation + entity_mention_model = EntityMention.from_dict(entity_mention_model_json) + assert entity_mention_model != False + + # Construct a model instance of EntityMention by calling from_dict on the json representation + entity_mention_model_dict = EntityMention.from_dict(entity_mention_model_json).__dict__ + entity_mention_model2 = EntityMention(**entity_mention_model_dict) + + # Verify the model instances are equivalent + assert entity_mention_model == entity_mention_model2 + + # Convert model instance back to dict and verify no loss of data + entity_mention_model_json2 = entity_mention_model.to_dict() + assert entity_mention_model_json2 == entity_mention_model_json + + +class TestModel_FeatureSentimentResults: + """ + Test Class for FeatureSentimentResults + """ + + def test_feature_sentiment_results_serialization(self): + """ + Test serialization/deserialization for FeatureSentimentResults + """ + + # Construct a json representation of a FeatureSentimentResults model + feature_sentiment_results_model_json = {} + feature_sentiment_results_model_json['score'] = 72.5 + + # Construct a model instance of FeatureSentimentResults by calling from_dict on the json representation + feature_sentiment_results_model = FeatureSentimentResults.from_dict(feature_sentiment_results_model_json) + assert feature_sentiment_results_model != False + + # Construct a model instance of FeatureSentimentResults by calling from_dict on the json representation + feature_sentiment_results_model_dict = FeatureSentimentResults.from_dict(feature_sentiment_results_model_json).__dict__ + feature_sentiment_results_model2 = FeatureSentimentResults(**feature_sentiment_results_model_dict) + + # Verify the model instances are equivalent + assert feature_sentiment_results_model == feature_sentiment_results_model2 + + # Convert model instance back to dict and verify no loss of data + feature_sentiment_results_model_json2 = feature_sentiment_results_model.to_dict() + assert feature_sentiment_results_model_json2 == feature_sentiment_results_model_json + + +class TestModel_Features: + """ + Test Class for Features + """ + + def test_features_serialization(self): + """ + Test serialization/deserialization for Features + """ + + # Construct dict forms of any model objects needed in order to build this model. + + classifications_options_model = {} # ClassificationsOptions + classifications_options_model['model'] = 'testString' + + concepts_options_model = {} # ConceptsOptions + concepts_options_model['limit'] = 8 + + emotion_options_model = {} # EmotionOptions + emotion_options_model['document'] = True + emotion_options_model['targets'] = ['testString'] + + entities_options_model = {} # EntitiesOptions + entities_options_model['limit'] = 50 + entities_options_model['mentions'] = False + entities_options_model['model'] = 'testString' + entities_options_model['sentiment'] = False + entities_options_model['emotion'] = False + + keywords_options_model = {} # KeywordsOptions + keywords_options_model['limit'] = 50 + keywords_options_model['sentiment'] = False + keywords_options_model['emotion'] = False + + relations_options_model = {} # RelationsOptions + relations_options_model['model'] = 'testString' + + semantic_roles_options_model = {} # SemanticRolesOptions + semantic_roles_options_model['limit'] = 50 + semantic_roles_options_model['keywords'] = False + semantic_roles_options_model['entities'] = False + + sentiment_options_model = {} # SentimentOptions + sentiment_options_model['document'] = True + sentiment_options_model['targets'] = ['testString'] + + categories_options_model = {} # CategoriesOptions + categories_options_model['explanation'] = False + categories_options_model['limit'] = 3 + categories_options_model['model'] = 'testString' + + syntax_options_tokens_model = {} # SyntaxOptionsTokens + syntax_options_tokens_model['lemma'] = True + syntax_options_tokens_model['part_of_speech'] = True + + syntax_options_model = {} # SyntaxOptions + syntax_options_model['tokens'] = syntax_options_tokens_model + syntax_options_model['sentences'] = True + + # Construct a json representation of a Features model + features_model_json = {} + features_model_json['classifications'] = classifications_options_model + features_model_json['concepts'] = concepts_options_model + features_model_json['emotion'] = emotion_options_model + features_model_json['entities'] = entities_options_model + features_model_json['keywords'] = keywords_options_model + features_model_json['metadata'] = {'anyKey': 'anyValue'} + features_model_json['relations'] = relations_options_model + features_model_json['semantic_roles'] = semantic_roles_options_model + features_model_json['sentiment'] = sentiment_options_model + features_model_json['categories'] = categories_options_model + features_model_json['syntax'] = syntax_options_model + + # Construct a model instance of Features by calling from_dict on the json representation + features_model = Features.from_dict(features_model_json) + assert features_model != False + + # Construct a model instance of Features by calling from_dict on the json representation + features_model_dict = Features.from_dict(features_model_json).__dict__ + features_model2 = Features(**features_model_dict) + + # Verify the model instances are equivalent + assert features_model == features_model2 + + # Convert model instance back to dict and verify no loss of data + features_model_json2 = features_model.to_dict() + assert features_model_json2 == features_model_json + + +class TestModel_FeaturesResultsMetadata: + """ + Test Class for FeaturesResultsMetadata + """ + + def test_features_results_metadata_serialization(self): + """ + Test serialization/deserialization for FeaturesResultsMetadata + """ + + # Construct dict forms of any model objects needed in order to build this model. + + author_model = {} # Author + author_model['name'] = 'testString' + + feed_model = {} # Feed + feed_model['link'] = 'testString' + + # Construct a json representation of a FeaturesResultsMetadata model + features_results_metadata_model_json = {} + features_results_metadata_model_json['authors'] = [author_model] + features_results_metadata_model_json['publication_date'] = 'testString' + features_results_metadata_model_json['title'] = 'testString' + features_results_metadata_model_json['image'] = 'testString' + features_results_metadata_model_json['feeds'] = [feed_model] + + # Construct a model instance of FeaturesResultsMetadata by calling from_dict on the json representation + features_results_metadata_model = FeaturesResultsMetadata.from_dict(features_results_metadata_model_json) + assert features_results_metadata_model != False + + # Construct a model instance of FeaturesResultsMetadata by calling from_dict on the json representation + features_results_metadata_model_dict = FeaturesResultsMetadata.from_dict(features_results_metadata_model_json).__dict__ + features_results_metadata_model2 = FeaturesResultsMetadata(**features_results_metadata_model_dict) + + # Verify the model instances are equivalent + assert features_results_metadata_model == features_results_metadata_model2 + + # Convert model instance back to dict and verify no loss of data + features_results_metadata_model_json2 = features_results_metadata_model.to_dict() + assert features_results_metadata_model_json2 == features_results_metadata_model_json + + +class TestModel_Feed: + """ + Test Class for Feed + """ + + def test_feed_serialization(self): + """ + Test serialization/deserialization for Feed + """ + + # Construct a json representation of a Feed model + feed_model_json = {} + feed_model_json['link'] = 'testString' + + # Construct a model instance of Feed by calling from_dict on the json representation + feed_model = Feed.from_dict(feed_model_json) + assert feed_model != False + + # Construct a model instance of Feed by calling from_dict on the json representation + feed_model_dict = Feed.from_dict(feed_model_json).__dict__ + feed_model2 = Feed(**feed_model_dict) + + # Verify the model instances are equivalent + assert feed_model == feed_model2 + + # Convert model instance back to dict and verify no loss of data + feed_model_json2 = feed_model.to_dict() + assert feed_model_json2 == feed_model_json + + +class TestModel_KeywordsOptions: + """ + Test Class for KeywordsOptions + """ + + def test_keywords_options_serialization(self): + """ + Test serialization/deserialization for KeywordsOptions + """ + + # Construct a json representation of a KeywordsOptions model + keywords_options_model_json = {} + keywords_options_model_json['limit'] = 50 + keywords_options_model_json['sentiment'] = False + keywords_options_model_json['emotion'] = False + + # Construct a model instance of KeywordsOptions by calling from_dict on the json representation + keywords_options_model = KeywordsOptions.from_dict(keywords_options_model_json) + assert keywords_options_model != False + + # Construct a model instance of KeywordsOptions by calling from_dict on the json representation + keywords_options_model_dict = KeywordsOptions.from_dict(keywords_options_model_json).__dict__ + keywords_options_model2 = KeywordsOptions(**keywords_options_model_dict) + + # Verify the model instances are equivalent + assert keywords_options_model == keywords_options_model2 + + # Convert model instance back to dict and verify no loss of data + keywords_options_model_json2 = keywords_options_model.to_dict() + assert keywords_options_model_json2 == keywords_options_model_json + + +class TestModel_KeywordsResult: + """ + Test Class for KeywordsResult + """ + + def test_keywords_result_serialization(self): + """ + Test serialization/deserialization for KeywordsResult + """ + + # Construct dict forms of any model objects needed in order to build this model. + + emotion_scores_model = {} # EmotionScores + emotion_scores_model['anger'] = 72.5 + emotion_scores_model['disgust'] = 72.5 + emotion_scores_model['fear'] = 72.5 + emotion_scores_model['joy'] = 72.5 + emotion_scores_model['sadness'] = 72.5 + + feature_sentiment_results_model = {} # FeatureSentimentResults + feature_sentiment_results_model['score'] = 72.5 + + # Construct a json representation of a KeywordsResult model + keywords_result_model_json = {} + keywords_result_model_json['count'] = 38 + keywords_result_model_json['relevance'] = 72.5 + keywords_result_model_json['text'] = 'testString' + keywords_result_model_json['emotion'] = emotion_scores_model + keywords_result_model_json['sentiment'] = feature_sentiment_results_model + + # Construct a model instance of KeywordsResult by calling from_dict on the json representation + keywords_result_model = KeywordsResult.from_dict(keywords_result_model_json) + assert keywords_result_model != False + + # Construct a model instance of KeywordsResult by calling from_dict on the json representation + keywords_result_model_dict = KeywordsResult.from_dict(keywords_result_model_json).__dict__ + keywords_result_model2 = KeywordsResult(**keywords_result_model_dict) + + # Verify the model instances are equivalent + assert keywords_result_model == keywords_result_model2 + + # Convert model instance back to dict and verify no loss of data + keywords_result_model_json2 = keywords_result_model.to_dict() + assert keywords_result_model_json2 == keywords_result_model_json + + +class TestModel_ListModelsResults: + """ + Test Class for ListModelsResults + """ + + def test_list_models_results_serialization(self): + """ + Test serialization/deserialization for ListModelsResults + """ + + # Construct dict forms of any model objects needed in order to build this model. + + model_model = {} # Model + model_model['status'] = 'starting' + model_model['model_id'] = 'testString' + model_model['language'] = 'testString' + model_model['description'] = 'testString' + model_model['workspace_id'] = 'testString' + model_model['model_version'] = 'testString' + model_model['version'] = 'testString' + model_model['version_description'] = 'testString' + model_model['created'] = '2019-01-01T12:00:00Z' + + # Construct a json representation of a ListModelsResults model + list_models_results_model_json = {} + list_models_results_model_json['models'] = [model_model] + + # Construct a model instance of ListModelsResults by calling from_dict on the json representation + list_models_results_model = ListModelsResults.from_dict(list_models_results_model_json) + assert list_models_results_model != False + + # Construct a model instance of ListModelsResults by calling from_dict on the json representation + list_models_results_model_dict = ListModelsResults.from_dict(list_models_results_model_json).__dict__ + list_models_results_model2 = ListModelsResults(**list_models_results_model_dict) + + # Verify the model instances are equivalent + assert list_models_results_model == list_models_results_model2 + + # Convert model instance back to dict and verify no loss of data + list_models_results_model_json2 = list_models_results_model.to_dict() + assert list_models_results_model_json2 == list_models_results_model_json + + +class TestModel_Model: + """ + Test Class for Model + """ + + def test_model_serialization(self): + """ + Test serialization/deserialization for Model + """ + + # Construct a json representation of a Model model + model_model_json = {} + model_model_json['status'] = 'starting' + model_model_json['model_id'] = 'testString' + model_model_json['language'] = 'testString' + model_model_json['description'] = 'testString' + model_model_json['workspace_id'] = 'testString' + model_model_json['model_version'] = 'testString' + model_model_json['version'] = 'testString' + model_model_json['version_description'] = 'testString' + model_model_json['created'] = '2019-01-01T12:00:00Z' + + # Construct a model instance of Model by calling from_dict on the json representation + model_model = Model.from_dict(model_model_json) + assert model_model != False + + # Construct a model instance of Model by calling from_dict on the json representation + model_model_dict = Model.from_dict(model_model_json).__dict__ + model_model2 = Model(**model_model_dict) + + # Verify the model instances are equivalent + assert model_model == model_model2 + + # Convert model instance back to dict and verify no loss of data + model_model_json2 = model_model.to_dict() + assert model_model_json2 == model_model_json + + +class TestModel_Notice: + """ + Test Class for Notice + """ + + def test_notice_serialization(self): + """ + Test serialization/deserialization for Notice + """ + + # Construct a json representation of a Notice model + notice_model_json = {} + + # Construct a model instance of Notice by calling from_dict on the json representation + notice_model = Notice.from_dict(notice_model_json) + assert notice_model != False + + # Construct a model instance of Notice by calling from_dict on the json representation + notice_model_dict = Notice.from_dict(notice_model_json).__dict__ + notice_model2 = Notice(**notice_model_dict) + + # Verify the model instances are equivalent + assert notice_model == notice_model2 + + # Convert model instance back to dict and verify no loss of data + notice_model_json2 = notice_model.to_dict() + assert notice_model_json2 == notice_model_json + + +class TestModel_RelationArgument: + """ + Test Class for RelationArgument + """ + + def test_relation_argument_serialization(self): + """ + Test serialization/deserialization for RelationArgument + """ + + # Construct dict forms of any model objects needed in order to build this model. + + relation_entity_model = {} # RelationEntity + relation_entity_model['text'] = 'testString' + relation_entity_model['type'] = 'testString' + + # Construct a json representation of a RelationArgument model + relation_argument_model_json = {} + relation_argument_model_json['entities'] = [relation_entity_model] + relation_argument_model_json['location'] = [38] + relation_argument_model_json['text'] = 'testString' + + # Construct a model instance of RelationArgument by calling from_dict on the json representation + relation_argument_model = RelationArgument.from_dict(relation_argument_model_json) + assert relation_argument_model != False + + # Construct a model instance of RelationArgument by calling from_dict on the json representation + relation_argument_model_dict = RelationArgument.from_dict(relation_argument_model_json).__dict__ + relation_argument_model2 = RelationArgument(**relation_argument_model_dict) + + # Verify the model instances are equivalent + assert relation_argument_model == relation_argument_model2 + + # Convert model instance back to dict and verify no loss of data + relation_argument_model_json2 = relation_argument_model.to_dict() + assert relation_argument_model_json2 == relation_argument_model_json + + +class TestModel_RelationEntity: + """ + Test Class for RelationEntity + """ + + def test_relation_entity_serialization(self): + """ + Test serialization/deserialization for RelationEntity + """ + + # Construct a json representation of a RelationEntity model + relation_entity_model_json = {} + relation_entity_model_json['text'] = 'testString' + relation_entity_model_json['type'] = 'testString' + + # Construct a model instance of RelationEntity by calling from_dict on the json representation + relation_entity_model = RelationEntity.from_dict(relation_entity_model_json) + assert relation_entity_model != False + + # Construct a model instance of RelationEntity by calling from_dict on the json representation + relation_entity_model_dict = RelationEntity.from_dict(relation_entity_model_json).__dict__ + relation_entity_model2 = RelationEntity(**relation_entity_model_dict) + + # Verify the model instances are equivalent + assert relation_entity_model == relation_entity_model2 + + # Convert model instance back to dict and verify no loss of data + relation_entity_model_json2 = relation_entity_model.to_dict() + assert relation_entity_model_json2 == relation_entity_model_json + + +class TestModel_RelationsOptions: + """ + Test Class for RelationsOptions + """ + + def test_relations_options_serialization(self): + """ + Test serialization/deserialization for RelationsOptions + """ + + # Construct a json representation of a RelationsOptions model + relations_options_model_json = {} + relations_options_model_json['model'] = 'testString' + + # Construct a model instance of RelationsOptions by calling from_dict on the json representation + relations_options_model = RelationsOptions.from_dict(relations_options_model_json) + assert relations_options_model != False + + # Construct a model instance of RelationsOptions by calling from_dict on the json representation + relations_options_model_dict = RelationsOptions.from_dict(relations_options_model_json).__dict__ + relations_options_model2 = RelationsOptions(**relations_options_model_dict) + + # Verify the model instances are equivalent + assert relations_options_model == relations_options_model2 + + # Convert model instance back to dict and verify no loss of data + relations_options_model_json2 = relations_options_model.to_dict() + assert relations_options_model_json2 == relations_options_model_json + + +class TestModel_RelationsResult: + """ + Test Class for RelationsResult + """ + + def test_relations_result_serialization(self): + """ + Test serialization/deserialization for RelationsResult + """ + + # Construct dict forms of any model objects needed in order to build this model. + + relation_entity_model = {} # RelationEntity + relation_entity_model['text'] = 'testString' + relation_entity_model['type'] = 'testString' + + relation_argument_model = {} # RelationArgument + relation_argument_model['entities'] = [relation_entity_model] + relation_argument_model['location'] = [38] + relation_argument_model['text'] = 'testString' + + # Construct a json representation of a RelationsResult model + relations_result_model_json = {} + relations_result_model_json['score'] = 72.5 + relations_result_model_json['sentence'] = 'testString' + relations_result_model_json['type'] = 'testString' + relations_result_model_json['arguments'] = [relation_argument_model] + + # Construct a model instance of RelationsResult by calling from_dict on the json representation + relations_result_model = RelationsResult.from_dict(relations_result_model_json) + assert relations_result_model != False + + # Construct a model instance of RelationsResult by calling from_dict on the json representation + relations_result_model_dict = RelationsResult.from_dict(relations_result_model_json).__dict__ + relations_result_model2 = RelationsResult(**relations_result_model_dict) + + # Verify the model instances are equivalent + assert relations_result_model == relations_result_model2 + + # Convert model instance back to dict and verify no loss of data + relations_result_model_json2 = relations_result_model.to_dict() + assert relations_result_model_json2 == relations_result_model_json + + +class TestModel_SemanticRolesEntity: + """ + Test Class for SemanticRolesEntity + """ + + def test_semantic_roles_entity_serialization(self): + """ + Test serialization/deserialization for SemanticRolesEntity + """ + + # Construct a json representation of a SemanticRolesEntity model + semantic_roles_entity_model_json = {} + semantic_roles_entity_model_json['type'] = 'testString' + semantic_roles_entity_model_json['text'] = 'testString' + + # Construct a model instance of SemanticRolesEntity by calling from_dict on the json representation + semantic_roles_entity_model = SemanticRolesEntity.from_dict(semantic_roles_entity_model_json) + assert semantic_roles_entity_model != False + + # Construct a model instance of SemanticRolesEntity by calling from_dict on the json representation + semantic_roles_entity_model_dict = SemanticRolesEntity.from_dict(semantic_roles_entity_model_json).__dict__ + semantic_roles_entity_model2 = SemanticRolesEntity(**semantic_roles_entity_model_dict) + + # Verify the model instances are equivalent + assert semantic_roles_entity_model == semantic_roles_entity_model2 + + # Convert model instance back to dict and verify no loss of data + semantic_roles_entity_model_json2 = semantic_roles_entity_model.to_dict() + assert semantic_roles_entity_model_json2 == semantic_roles_entity_model_json + + +class TestModel_SemanticRolesKeyword: + """ + Test Class for SemanticRolesKeyword + """ + + def test_semantic_roles_keyword_serialization(self): + """ + Test serialization/deserialization for SemanticRolesKeyword + """ + + # Construct a json representation of a SemanticRolesKeyword model + semantic_roles_keyword_model_json = {} + semantic_roles_keyword_model_json['text'] = 'testString' + + # Construct a model instance of SemanticRolesKeyword by calling from_dict on the json representation + semantic_roles_keyword_model = SemanticRolesKeyword.from_dict(semantic_roles_keyword_model_json) + assert semantic_roles_keyword_model != False + + # Construct a model instance of SemanticRolesKeyword by calling from_dict on the json representation + semantic_roles_keyword_model_dict = SemanticRolesKeyword.from_dict(semantic_roles_keyword_model_json).__dict__ + semantic_roles_keyword_model2 = SemanticRolesKeyword(**semantic_roles_keyword_model_dict) + + # Verify the model instances are equivalent + assert semantic_roles_keyword_model == semantic_roles_keyword_model2 + + # Convert model instance back to dict and verify no loss of data + semantic_roles_keyword_model_json2 = semantic_roles_keyword_model.to_dict() + assert semantic_roles_keyword_model_json2 == semantic_roles_keyword_model_json + + +class TestModel_SemanticRolesOptions: + """ + Test Class for SemanticRolesOptions + """ + + def test_semantic_roles_options_serialization(self): + """ + Test serialization/deserialization for SemanticRolesOptions + """ + + # Construct a json representation of a SemanticRolesOptions model + semantic_roles_options_model_json = {} + semantic_roles_options_model_json['limit'] = 50 + semantic_roles_options_model_json['keywords'] = False + semantic_roles_options_model_json['entities'] = False + + # Construct a model instance of SemanticRolesOptions by calling from_dict on the json representation + semantic_roles_options_model = SemanticRolesOptions.from_dict(semantic_roles_options_model_json) + assert semantic_roles_options_model != False + + # Construct a model instance of SemanticRolesOptions by calling from_dict on the json representation + semantic_roles_options_model_dict = SemanticRolesOptions.from_dict(semantic_roles_options_model_json).__dict__ + semantic_roles_options_model2 = SemanticRolesOptions(**semantic_roles_options_model_dict) + + # Verify the model instances are equivalent + assert semantic_roles_options_model == semantic_roles_options_model2 + + # Convert model instance back to dict and verify no loss of data + semantic_roles_options_model_json2 = semantic_roles_options_model.to_dict() + assert semantic_roles_options_model_json2 == semantic_roles_options_model_json + + +class TestModel_SemanticRolesResult: + """ + Test Class for SemanticRolesResult + """ + + def test_semantic_roles_result_serialization(self): + """ + Test serialization/deserialization for SemanticRolesResult + """ + + # Construct dict forms of any model objects needed in order to build this model. + + semantic_roles_entity_model = {} # SemanticRolesEntity + semantic_roles_entity_model['type'] = 'testString' + semantic_roles_entity_model['text'] = 'testString' + + semantic_roles_keyword_model = {} # SemanticRolesKeyword + semantic_roles_keyword_model['text'] = 'testString' + + semantic_roles_result_subject_model = {} # SemanticRolesResultSubject + semantic_roles_result_subject_model['text'] = 'testString' + semantic_roles_result_subject_model['entities'] = [semantic_roles_entity_model] + semantic_roles_result_subject_model['keywords'] = [semantic_roles_keyword_model] + + semantic_roles_verb_model = {} # SemanticRolesVerb + semantic_roles_verb_model['text'] = 'testString' + semantic_roles_verb_model['tense'] = 'testString' + + semantic_roles_result_action_model = {} # SemanticRolesResultAction + semantic_roles_result_action_model['text'] = 'testString' + semantic_roles_result_action_model['normalized'] = 'testString' + semantic_roles_result_action_model['verb'] = semantic_roles_verb_model + + semantic_roles_result_object_model = {} # SemanticRolesResultObject + semantic_roles_result_object_model['text'] = 'testString' + semantic_roles_result_object_model['keywords'] = [semantic_roles_keyword_model] + + # Construct a json representation of a SemanticRolesResult model + semantic_roles_result_model_json = {} + semantic_roles_result_model_json['sentence'] = 'testString' + semantic_roles_result_model_json['subject'] = semantic_roles_result_subject_model + semantic_roles_result_model_json['action'] = semantic_roles_result_action_model + semantic_roles_result_model_json['object'] = semantic_roles_result_object_model + + # Construct a model instance of SemanticRolesResult by calling from_dict on the json representation + semantic_roles_result_model = SemanticRolesResult.from_dict(semantic_roles_result_model_json) + assert semantic_roles_result_model != False + + # Construct a model instance of SemanticRolesResult by calling from_dict on the json representation + semantic_roles_result_model_dict = SemanticRolesResult.from_dict(semantic_roles_result_model_json).__dict__ + semantic_roles_result_model2 = SemanticRolesResult(**semantic_roles_result_model_dict) + + # Verify the model instances are equivalent + assert semantic_roles_result_model == semantic_roles_result_model2 + + # Convert model instance back to dict and verify no loss of data + semantic_roles_result_model_json2 = semantic_roles_result_model.to_dict() + assert semantic_roles_result_model_json2 == semantic_roles_result_model_json + + +class TestModel_SemanticRolesResultAction: + """ + Test Class for SemanticRolesResultAction + """ + + def test_semantic_roles_result_action_serialization(self): + """ + Test serialization/deserialization for SemanticRolesResultAction + """ + + # Construct dict forms of any model objects needed in order to build this model. + + semantic_roles_verb_model = {} # SemanticRolesVerb + semantic_roles_verb_model['text'] = 'testString' + semantic_roles_verb_model['tense'] = 'testString' + + # Construct a json representation of a SemanticRolesResultAction model + semantic_roles_result_action_model_json = {} + semantic_roles_result_action_model_json['text'] = 'testString' + semantic_roles_result_action_model_json['normalized'] = 'testString' + semantic_roles_result_action_model_json['verb'] = semantic_roles_verb_model + + # Construct a model instance of SemanticRolesResultAction by calling from_dict on the json representation + semantic_roles_result_action_model = SemanticRolesResultAction.from_dict(semantic_roles_result_action_model_json) + assert semantic_roles_result_action_model != False + + # Construct a model instance of SemanticRolesResultAction by calling from_dict on the json representation + semantic_roles_result_action_model_dict = SemanticRolesResultAction.from_dict(semantic_roles_result_action_model_json).__dict__ + semantic_roles_result_action_model2 = SemanticRolesResultAction(**semantic_roles_result_action_model_dict) + + # Verify the model instances are equivalent + assert semantic_roles_result_action_model == semantic_roles_result_action_model2 + + # Convert model instance back to dict and verify no loss of data + semantic_roles_result_action_model_json2 = semantic_roles_result_action_model.to_dict() + assert semantic_roles_result_action_model_json2 == semantic_roles_result_action_model_json + + +class TestModel_SemanticRolesResultObject: + """ + Test Class for SemanticRolesResultObject + """ + + def test_semantic_roles_result_object_serialization(self): + """ + Test serialization/deserialization for SemanticRolesResultObject + """ + + # Construct dict forms of any model objects needed in order to build this model. + + semantic_roles_keyword_model = {} # SemanticRolesKeyword + semantic_roles_keyword_model['text'] = 'testString' + + # Construct a json representation of a SemanticRolesResultObject model + semantic_roles_result_object_model_json = {} + semantic_roles_result_object_model_json['text'] = 'testString' + semantic_roles_result_object_model_json['keywords'] = [semantic_roles_keyword_model] + + # Construct a model instance of SemanticRolesResultObject by calling from_dict on the json representation + semantic_roles_result_object_model = SemanticRolesResultObject.from_dict(semantic_roles_result_object_model_json) + assert semantic_roles_result_object_model != False + + # Construct a model instance of SemanticRolesResultObject by calling from_dict on the json representation + semantic_roles_result_object_model_dict = SemanticRolesResultObject.from_dict(semantic_roles_result_object_model_json).__dict__ + semantic_roles_result_object_model2 = SemanticRolesResultObject(**semantic_roles_result_object_model_dict) + + # Verify the model instances are equivalent + assert semantic_roles_result_object_model == semantic_roles_result_object_model2 + + # Convert model instance back to dict and verify no loss of data + semantic_roles_result_object_model_json2 = semantic_roles_result_object_model.to_dict() + assert semantic_roles_result_object_model_json2 == semantic_roles_result_object_model_json + + +class TestModel_SemanticRolesResultSubject: + """ + Test Class for SemanticRolesResultSubject + """ + + def test_semantic_roles_result_subject_serialization(self): + """ + Test serialization/deserialization for SemanticRolesResultSubject + """ + + # Construct dict forms of any model objects needed in order to build this model. + + semantic_roles_entity_model = {} # SemanticRolesEntity + semantic_roles_entity_model['type'] = 'testString' + semantic_roles_entity_model['text'] = 'testString' + + semantic_roles_keyword_model = {} # SemanticRolesKeyword + semantic_roles_keyword_model['text'] = 'testString' + + # Construct a json representation of a SemanticRolesResultSubject model + semantic_roles_result_subject_model_json = {} + semantic_roles_result_subject_model_json['text'] = 'testString' + semantic_roles_result_subject_model_json['entities'] = [semantic_roles_entity_model] + semantic_roles_result_subject_model_json['keywords'] = [semantic_roles_keyword_model] + + # Construct a model instance of SemanticRolesResultSubject by calling from_dict on the json representation + semantic_roles_result_subject_model = SemanticRolesResultSubject.from_dict(semantic_roles_result_subject_model_json) + assert semantic_roles_result_subject_model != False + + # Construct a model instance of SemanticRolesResultSubject by calling from_dict on the json representation + semantic_roles_result_subject_model_dict = SemanticRolesResultSubject.from_dict(semantic_roles_result_subject_model_json).__dict__ + semantic_roles_result_subject_model2 = SemanticRolesResultSubject(**semantic_roles_result_subject_model_dict) + + # Verify the model instances are equivalent + assert semantic_roles_result_subject_model == semantic_roles_result_subject_model2 + + # Convert model instance back to dict and verify no loss of data + semantic_roles_result_subject_model_json2 = semantic_roles_result_subject_model.to_dict() + assert semantic_roles_result_subject_model_json2 == semantic_roles_result_subject_model_json + + +class TestModel_SemanticRolesVerb: + """ + Test Class for SemanticRolesVerb + """ + + def test_semantic_roles_verb_serialization(self): + """ + Test serialization/deserialization for SemanticRolesVerb + """ + + # Construct a json representation of a SemanticRolesVerb model + semantic_roles_verb_model_json = {} + semantic_roles_verb_model_json['text'] = 'testString' + semantic_roles_verb_model_json['tense'] = 'testString' + + # Construct a model instance of SemanticRolesVerb by calling from_dict on the json representation + semantic_roles_verb_model = SemanticRolesVerb.from_dict(semantic_roles_verb_model_json) + assert semantic_roles_verb_model != False + + # Construct a model instance of SemanticRolesVerb by calling from_dict on the json representation + semantic_roles_verb_model_dict = SemanticRolesVerb.from_dict(semantic_roles_verb_model_json).__dict__ + semantic_roles_verb_model2 = SemanticRolesVerb(**semantic_roles_verb_model_dict) + + # Verify the model instances are equivalent + assert semantic_roles_verb_model == semantic_roles_verb_model2 + + # Convert model instance back to dict and verify no loss of data + semantic_roles_verb_model_json2 = semantic_roles_verb_model.to_dict() + assert semantic_roles_verb_model_json2 == semantic_roles_verb_model_json + + +class TestModel_SentenceResult: + """ + Test Class for SentenceResult + """ + + def test_sentence_result_serialization(self): + """ + Test serialization/deserialization for SentenceResult + """ + + # Construct a json representation of a SentenceResult model + sentence_result_model_json = {} + sentence_result_model_json['text'] = 'testString' + sentence_result_model_json['location'] = [38] + + # Construct a model instance of SentenceResult by calling from_dict on the json representation + sentence_result_model = SentenceResult.from_dict(sentence_result_model_json) + assert sentence_result_model != False + + # Construct a model instance of SentenceResult by calling from_dict on the json representation + sentence_result_model_dict = SentenceResult.from_dict(sentence_result_model_json).__dict__ + sentence_result_model2 = SentenceResult(**sentence_result_model_dict) + + # Verify the model instances are equivalent + assert sentence_result_model == sentence_result_model2 + + # Convert model instance back to dict and verify no loss of data + sentence_result_model_json2 = sentence_result_model.to_dict() + assert sentence_result_model_json2 == sentence_result_model_json + + +class TestModel_SentimentOptions: + """ + Test Class for SentimentOptions + """ + + def test_sentiment_options_serialization(self): + """ + Test serialization/deserialization for SentimentOptions + """ + + # Construct a json representation of a SentimentOptions model + sentiment_options_model_json = {} + sentiment_options_model_json['document'] = True + sentiment_options_model_json['targets'] = ['testString'] + + # Construct a model instance of SentimentOptions by calling from_dict on the json representation + sentiment_options_model = SentimentOptions.from_dict(sentiment_options_model_json) + assert sentiment_options_model != False + + # Construct a model instance of SentimentOptions by calling from_dict on the json representation + sentiment_options_model_dict = SentimentOptions.from_dict(sentiment_options_model_json).__dict__ + sentiment_options_model2 = SentimentOptions(**sentiment_options_model_dict) + + # Verify the model instances are equivalent + assert sentiment_options_model == sentiment_options_model2 + + # Convert model instance back to dict and verify no loss of data + sentiment_options_model_json2 = sentiment_options_model.to_dict() + assert sentiment_options_model_json2 == sentiment_options_model_json + + +class TestModel_SentimentResult: + """ + Test Class for SentimentResult + """ + + def test_sentiment_result_serialization(self): + """ + Test serialization/deserialization for SentimentResult + """ + + # Construct dict forms of any model objects needed in order to build this model. + + document_sentiment_results_model = {} # DocumentSentimentResults + document_sentiment_results_model['label'] = 'positive' + document_sentiment_results_model['score'] = 0.127034 + + targeted_sentiment_results_model = {} # TargetedSentimentResults + targeted_sentiment_results_model['text'] = 'stocks' + targeted_sentiment_results_model['score'] = 0.279964 + + # Construct a json representation of a SentimentResult model + sentiment_result_model_json = {} + sentiment_result_model_json['document'] = document_sentiment_results_model + sentiment_result_model_json['targets'] = [targeted_sentiment_results_model] + + # Construct a model instance of SentimentResult by calling from_dict on the json representation + sentiment_result_model = SentimentResult.from_dict(sentiment_result_model_json) + assert sentiment_result_model != False + + # Construct a model instance of SentimentResult by calling from_dict on the json representation + sentiment_result_model_dict = SentimentResult.from_dict(sentiment_result_model_json).__dict__ + sentiment_result_model2 = SentimentResult(**sentiment_result_model_dict) + + # Verify the model instances are equivalent + assert sentiment_result_model == sentiment_result_model2 + + # Convert model instance back to dict and verify no loss of data + sentiment_result_model_json2 = sentiment_result_model.to_dict() + assert sentiment_result_model_json2 == sentiment_result_model_json + + +class TestModel_SyntaxOptions: + """ + Test Class for SyntaxOptions + """ + + def test_syntax_options_serialization(self): + """ + Test serialization/deserialization for SyntaxOptions + """ + + # Construct dict forms of any model objects needed in order to build this model. + + syntax_options_tokens_model = {} # SyntaxOptionsTokens + syntax_options_tokens_model['lemma'] = True + syntax_options_tokens_model['part_of_speech'] = True + + # Construct a json representation of a SyntaxOptions model + syntax_options_model_json = {} + syntax_options_model_json['tokens'] = syntax_options_tokens_model + syntax_options_model_json['sentences'] = True + + # Construct a model instance of SyntaxOptions by calling from_dict on the json representation + syntax_options_model = SyntaxOptions.from_dict(syntax_options_model_json) + assert syntax_options_model != False + + # Construct a model instance of SyntaxOptions by calling from_dict on the json representation + syntax_options_model_dict = SyntaxOptions.from_dict(syntax_options_model_json).__dict__ + syntax_options_model2 = SyntaxOptions(**syntax_options_model_dict) + + # Verify the model instances are equivalent + assert syntax_options_model == syntax_options_model2 + + # Convert model instance back to dict and verify no loss of data + syntax_options_model_json2 = syntax_options_model.to_dict() + assert syntax_options_model_json2 == syntax_options_model_json + + +class TestModel_SyntaxOptionsTokens: + """ + Test Class for SyntaxOptionsTokens + """ + + def test_syntax_options_tokens_serialization(self): + """ + Test serialization/deserialization for SyntaxOptionsTokens + """ + + # Construct a json representation of a SyntaxOptionsTokens model + syntax_options_tokens_model_json = {} + syntax_options_tokens_model_json['lemma'] = True + syntax_options_tokens_model_json['part_of_speech'] = True + + # Construct a model instance of SyntaxOptionsTokens by calling from_dict on the json representation + syntax_options_tokens_model = SyntaxOptionsTokens.from_dict(syntax_options_tokens_model_json) + assert syntax_options_tokens_model != False + + # Construct a model instance of SyntaxOptionsTokens by calling from_dict on the json representation + syntax_options_tokens_model_dict = SyntaxOptionsTokens.from_dict(syntax_options_tokens_model_json).__dict__ + syntax_options_tokens_model2 = SyntaxOptionsTokens(**syntax_options_tokens_model_dict) + + # Verify the model instances are equivalent + assert syntax_options_tokens_model == syntax_options_tokens_model2 + + # Convert model instance back to dict and verify no loss of data + syntax_options_tokens_model_json2 = syntax_options_tokens_model.to_dict() + assert syntax_options_tokens_model_json2 == syntax_options_tokens_model_json + + +class TestModel_SyntaxResult: + """ + Test Class for SyntaxResult + """ + + def test_syntax_result_serialization(self): + """ + Test serialization/deserialization for SyntaxResult + """ + + # Construct dict forms of any model objects needed in order to build this model. + + token_result_model = {} # TokenResult + token_result_model['text'] = 'testString' + token_result_model['part_of_speech'] = 'ADJ' + token_result_model['location'] = [38] + token_result_model['lemma'] = 'testString' + + sentence_result_model = {} # SentenceResult + sentence_result_model['text'] = 'testString' + sentence_result_model['location'] = [38] + + # Construct a json representation of a SyntaxResult model + syntax_result_model_json = {} + syntax_result_model_json['tokens'] = [token_result_model] + syntax_result_model_json['sentences'] = [sentence_result_model] + + # Construct a model instance of SyntaxResult by calling from_dict on the json representation + syntax_result_model = SyntaxResult.from_dict(syntax_result_model_json) + assert syntax_result_model != False + + # Construct a model instance of SyntaxResult by calling from_dict on the json representation + syntax_result_model_dict = SyntaxResult.from_dict(syntax_result_model_json).__dict__ + syntax_result_model2 = SyntaxResult(**syntax_result_model_dict) + + # Verify the model instances are equivalent + assert syntax_result_model == syntax_result_model2 + + # Convert model instance back to dict and verify no loss of data + syntax_result_model_json2 = syntax_result_model.to_dict() + assert syntax_result_model_json2 == syntax_result_model_json + + +class TestModel_TargetedEmotionResults: + """ + Test Class for TargetedEmotionResults + """ + + def test_targeted_emotion_results_serialization(self): + """ + Test serialization/deserialization for TargetedEmotionResults + """ + + # Construct dict forms of any model objects needed in order to build this model. + + emotion_scores_model = {} # EmotionScores + emotion_scores_model['anger'] = 72.5 + emotion_scores_model['disgust'] = 72.5 + emotion_scores_model['fear'] = 72.5 + emotion_scores_model['joy'] = 72.5 + emotion_scores_model['sadness'] = 72.5 + + # Construct a json representation of a TargetedEmotionResults model + targeted_emotion_results_model_json = {} + targeted_emotion_results_model_json['text'] = 'testString' + targeted_emotion_results_model_json['emotion'] = emotion_scores_model + + # Construct a model instance of TargetedEmotionResults by calling from_dict on the json representation + targeted_emotion_results_model = TargetedEmotionResults.from_dict(targeted_emotion_results_model_json) + assert targeted_emotion_results_model != False + + # Construct a model instance of TargetedEmotionResults by calling from_dict on the json representation + targeted_emotion_results_model_dict = TargetedEmotionResults.from_dict(targeted_emotion_results_model_json).__dict__ + targeted_emotion_results_model2 = TargetedEmotionResults(**targeted_emotion_results_model_dict) + + # Verify the model instances are equivalent + assert targeted_emotion_results_model == targeted_emotion_results_model2 + + # Convert model instance back to dict and verify no loss of data + targeted_emotion_results_model_json2 = targeted_emotion_results_model.to_dict() + assert targeted_emotion_results_model_json2 == targeted_emotion_results_model_json + + +class TestModel_TargetedSentimentResults: + """ + Test Class for TargetedSentimentResults + """ + + def test_targeted_sentiment_results_serialization(self): + """ + Test serialization/deserialization for TargetedSentimentResults + """ + + # Construct a json representation of a TargetedSentimentResults model + targeted_sentiment_results_model_json = {} + targeted_sentiment_results_model_json['text'] = 'testString' + targeted_sentiment_results_model_json['score'] = 72.5 + + # Construct a model instance of TargetedSentimentResults by calling from_dict on the json representation + targeted_sentiment_results_model = TargetedSentimentResults.from_dict(targeted_sentiment_results_model_json) + assert targeted_sentiment_results_model != False + + # Construct a model instance of TargetedSentimentResults by calling from_dict on the json representation + targeted_sentiment_results_model_dict = TargetedSentimentResults.from_dict(targeted_sentiment_results_model_json).__dict__ + targeted_sentiment_results_model2 = TargetedSentimentResults(**targeted_sentiment_results_model_dict) + + # Verify the model instances are equivalent + assert targeted_sentiment_results_model == targeted_sentiment_results_model2 + + # Convert model instance back to dict and verify no loss of data + targeted_sentiment_results_model_json2 = targeted_sentiment_results_model.to_dict() + assert targeted_sentiment_results_model_json2 == targeted_sentiment_results_model_json + + +class TestModel_TokenResult: + """ + Test Class for TokenResult + """ + + def test_token_result_serialization(self): + """ + Test serialization/deserialization for TokenResult + """ + + # Construct a json representation of a TokenResult model + token_result_model_json = {} + token_result_model_json['text'] = 'testString' + token_result_model_json['part_of_speech'] = 'ADJ' + token_result_model_json['location'] = [38] + token_result_model_json['lemma'] = 'testString' + + # Construct a model instance of TokenResult by calling from_dict on the json representation + token_result_model = TokenResult.from_dict(token_result_model_json) + assert token_result_model != False + + # Construct a model instance of TokenResult by calling from_dict on the json representation + token_result_model_dict = TokenResult.from_dict(token_result_model_json).__dict__ + token_result_model2 = TokenResult(**token_result_model_dict) + + # Verify the model instances are equivalent + assert token_result_model == token_result_model2 + + # Convert model instance back to dict and verify no loss of data + token_result_model_json2 = token_result_model.to_dict() + assert token_result_model_json2 == token_result_model_json + + +# endregion +############################################################################## +# End of Model Tests +############################################################################## diff --git a/test/unit/test_personality_insights_v3.py b/test/unit/test_personality_insights_v3.py deleted file mode 100755 index 548e06486..000000000 --- a/test/unit/test_personality_insights_v3.py +++ /dev/null @@ -1,117 +0,0 @@ -# coding: utf-8 -import responses -import ibm_watson -import os -import codecs -from ibm_watson.personality_insights_v3 import Profile - -profile_url = 'https://gateway.watsonplatform.net/personality-insights/api/v3/profile' - -@responses.activate -def test_plain_to_json(): - - personality_insights = ibm_watson.PersonalityInsightsV3( - '2016-10-20', username="username", password="password") - - with open(os.path.join(os.path.dirname(__file__), '../../resources/personality-v3-expect1.txt')) as expect_file: - profile_response = expect_file.read() - - responses.add(responses.POST, profile_url, - body=profile_response, status=200, - content_type='application/json') - - with open(os.path.join(os.path.dirname(__file__), '../../resources/personality-v3.txt')) as personality_text: - response = personality_insights.profile( - personality_text, 'application/json', content_type='text/plain;charset=utf-8').get_result() - - assert 'version=2016-10-20' in responses.calls[0].request.url - assert responses.calls[0].response.text == profile_response - assert len(responses.calls) == 1 - # Verify that response can be converted to a Profile - Profile._from_dict(response) - -@responses.activate -def test_json_to_json(): - - personality_insights = ibm_watson.PersonalityInsightsV3( - '2016-10-20', username="username", password="password") - - with open(os.path.join(os.path.dirname(__file__), '../../resources/personality-v3-expect2.txt')) as expect_file: - profile_response = expect_file.read() - - responses.add(responses.POST, profile_url, - body=profile_response, status=200, - content_type='application/json') - - with open(os.path.join(os.path.dirname(__file__), '../../resources/personality-v3.json')) as personality_text: - response = personality_insights.profile( - personality_text, accept='application/json', - content_type='application/json', - raw_scores=True, - consumption_preferences=True).get_result() - - assert 'version=2016-10-20' in responses.calls[0].request.url - assert 'raw_scores=true' in responses.calls[0].request.url - assert 'consumption_preferences=true' in responses.calls[0].request.url - assert responses.calls[0].response.text == profile_response - assert len(responses.calls) == 1 - # Verify that response can be converted to a Profile - Profile._from_dict(response) - -@responses.activate -def test_json_to_csv(): - - personality_insights = ibm_watson.PersonalityInsightsV3( - '2016-10-20', username="username", password="password") - - with open(os.path.join(os.path.dirname(__file__), '../../resources/personality-v3-expect3.txt')) as expect_file: - profile_response = expect_file.read() - - responses.add(responses.POST, profile_url, - body=profile_response, status=200, - content_type='text/csv') - - with open(os.path.join(os.path.dirname(__file__), '../../resources/personality-v3.json')) as personality_text: - personality_insights.profile( - personality_text, - 'text/csv', - content_type='application/json', - csv_headers=True, - raw_scores=True, - consumption_preferences=True) - - assert 'version=2016-10-20' in responses.calls[0].request.url - assert 'raw_scores=true' in responses.calls[0].request.url - assert 'consumption_preferences=true' in responses.calls[0].request.url - assert 'csv_headers=true' in responses.calls[0].request.url - assert responses.calls[0].response.text == profile_response - assert len(responses.calls) == 1 - - -@responses.activate -def test_plain_to_json_es(): - - personality_insights = ibm_watson.PersonalityInsightsV3( - '2016-10-20', username="username", password="password") - - with codecs.open(os.path.join(os.path.dirname(__file__), '../../resources/personality-v3-expect4.txt'), \ - encoding='utf-8') as expect_file: - profile_response = expect_file.read() - - responses.add(responses.POST, profile_url, - body=profile_response, status=200, - content_type='application/json') - - with open(os.path.join(os.path.dirname(__file__), '../../resources/personality-v3-es.txt')) as personality_text: - response = personality_insights.profile( - personality_text, - 'application/json', - content_type='text/plain;charset=utf-8', - content_language='es', - accept_language='es').get_result() - - assert 'version=2016-10-20' in responses.calls[0].request.url - assert responses.calls[0].response.text == profile_response - assert len(responses.calls) == 1 - # Verify that response can be converted to a Profile - Profile._from_dict(response) diff --git a/test/unit/test_speech_to_text_v1.py b/test/unit/test_speech_to_text_v1.py old mode 100755 new mode 100644 index 87a5514c9..cb5babf87 --- a/test/unit/test_speech_to_text_v1.py +++ b/test/unit/test_speech_to_text_v1.py @@ -1,590 +1,6112 @@ -# coding=utf-8 -import os +# -*- coding: utf-8 -*- +# (C) Copyright IBM Corp. 2026. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Unit Tests for SpeechToTextV1 +""" + +from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator +import inspect +import io import json +import pytest +import re +import requests import responses -import ibm_watson -from ibm_watson.speech_to_text_v1 import CustomWord - - -@responses.activate -def test_success(): - models_url = 'https://stream.watsonplatform.net/speech-to-text/api/v1/models' - models_response = '{"models": [{"url": "https://stream.watsonplatform.net/speech-to-text/api/v1/models/' \ - 'WatsonModel", "rate": 16000, "name": "WatsonModel", "language": "en-US", "description": ' \ - '"Watson model \'v7w_134k.3\' for Attila 2-5 reco engine."}]}' - - responses.add( - responses.GET, - models_url, - body=models_response, - status=200, - content_type='application/json') - - speech_to_text = ibm_watson.SpeechToTextV1( - username="username", password="password") - speech_to_text.list_models() - - assert responses.calls[0].request.url == models_url - assert responses.calls[0].response.text == models_response - - recognize_url = 'https://stream.watsonplatform.net/speech-to-text/api/v1/recognize' - recognize_response = '{"results":[{"alternatives":[{"transcript":"thunderstorms could produce large hail ' \ - 'isolated tornadoes and heavy rain "}],"final":true}],"result_index":0}' - - responses.add( - responses.POST, - recognize_url, - body=recognize_response, - status=200, - content_type='application/json') - - with open(os.path.join(os.path.dirname(__file__), '../../resources/speech.wav'), 'rb') as audio_file: - speech_to_text.recognize( - audio=audio_file, content_type='audio/l16; rate=44100') - - request_url = responses.calls[1].request.url - assert request_url == recognize_url - assert responses.calls[1].response.text == recognize_response - - with open(os.path.join(os.path.dirname(__file__), '../../resources/speech.wav'), 'rb') as audio_file: - speech_to_text.recognize( - audio=audio_file, customization_id='x', content_type='audio/l16; rate=44100') - expected_url = "{0}?customization_id=x".format(recognize_url) - assert expected_url == responses.calls[2].request.url - assert len(responses.calls) == 3 - - -@responses.activate -def test_get_model(): - model_url = 'https://stream.watsonplatform.net/speech-to-text/api/v1/models/modelid' - responses.add( - responses.GET, - model_url, - body='{"bogus_response": "yep"}', - status=200, - content_type='application/json') - speech_to_text = ibm_watson.SpeechToTextV1( - username="username", password="password") - speech_to_text.get_model(model_id='modelid') - assert len(responses.calls) == 1 - - -def _decode_body(body): - try: - return body.decode('utf-8') - except: - return body - - -@responses.activate -def test_recognitions(): - url = 'https://stream.watsonplatform.net/speech-to-text/api/v1/recognitions' - get_response = '{"recognitions": [{"created": "2018-02-01T17:43:15.432Z","id": "6193190c-0777-11e8-9b4b-43ad845196dd","updated": "2018-02-01T17:43:17.998Z","status": "failed"}]}' - responses.add( - responses.GET, - url, - body=get_response, - status=200, - content_type='application/json') - - responses.add( - responses.POST, - url, - body='{"status": "waiting"}', - status=200, - content_type='application/json') - - responses.add( - responses.DELETE, - "{0}/jobid".format(url), - body='{"description": "deleted successfully"}', - status=200, - content_type='application/json') - - responses.add( - responses.GET, - "{0}/jobid".format(url), - body='{"status": "waiting"}', - status=200, - content_type='application/json') - - speech_to_text = ibm_watson.SpeechToTextV1( - username="username", password="password") - - speech_to_text.check_jobs() - assert responses.calls[0].response.json()['recognitions'][0][ - 'id'] == '6193190c-0777-11e8-9b4b-43ad845196dd' - - speech_to_text.check_job('jobid') - assert responses.calls[1].response.json() == {'status': 'waiting'} - - with open(os.path.join(os.path.dirname(__file__), '../../resources/speech.wav'), 'rb') as audio_file: - speech_to_text.create_job(audio=audio_file, content_type='audio/basic') - assert responses.calls[2].response.json() == {'status': 'waiting'} - - speech_to_text.delete_job('jobid') - assert responses.calls[3].response.json() == { - "description": "deleted successfully" - } - - assert len(responses.calls) == 4 - - -@responses.activate -def test_callbacks(): - base_url = 'https://stream.watsonplatform.net/speech-to-text/api/v1' - responses.add( - responses.POST, - "{0}/register_callback".format(base_url), - body='{"status": "created", "url": "monitorcalls.com"}', - status=200, - content_type='application/json') - - responses.add( - responses.POST, - "{0}/unregister_callback".format(base_url), - body='{"response": "The callback URL was successfully unregistered"}', - status=200, - content_type='application/json') - - speech_to_text = ibm_watson.SpeechToTextV1( - username="username", password="password") - speech_to_text.register_callback("monitorcalls.com") - assert responses.calls[0].response.json() == { - "status": "created", - "url": "monitorcalls.com" - } - - speech_to_text.unregister_callback("monitorcalls.com") - assert responses.calls[1].response.json() == { - "response": "The callback URL was successfully unregistered" - } - - assert len(responses.calls) == 2 - - -@responses.activate -def test_custom_model(): - customization_url = 'https://stream.watsonplatform.net/speech-to-text/api/v1/customizations' - train_url = "{0}/{1}/train".format(customization_url, 'customid') - - responses.add( - responses.GET, - customization_url, - body='{"get response": "yep"}', - status=200, - content_type='application/json') - - responses.add( - responses.POST, - customization_url, - body='{"bogus_response": "yep"}', - status=200, - content_type='application/json') - - responses.add( - responses.GET, - "{0}/modelid".format(customization_url), - body='{"bogus_response": "yep"}', - status=200, - content_type='application/json') - - responses.add( - responses.DELETE, - "{0}/modelid".format(customization_url), - body='{"bogus_response": "yep"}', - status=200, - content_type='application/json') - - responses.add( - responses.POST, - train_url, - body='{"bogus_response": "yep"}', - status=200, - content_type='application/json') - - speech_to_text = ibm_watson.SpeechToTextV1( - username="username", password="password") - - speech_to_text.list_language_models() - - speech_to_text.create_language_model( - name="Example model", - base_model_name="en-US_BroadbandModel") - - parsed_body = json.loads(_decode_body(responses.calls[1].request.body)) - assert parsed_body['name'] == 'Example model' - - speech_to_text.create_language_model( - name="Example model Two", - base_model_name="en-US_BroadbandModel") - - parsed_body = json.loads(_decode_body(responses.calls[2].request.body)) - assert parsed_body['name'] == 'Example model Two' - assert parsed_body['base_model_name'] == 'en-US_BroadbandModel' - - speech_to_text.train_language_model('customid') - speech_to_text.get_language_model(customization_id='modelid') - speech_to_text.delete_language_model(customization_id='modelid') - - assert len(responses.calls) == 6 - - -@responses.activate -def test_acoustic_model(): - acoustic_customization_url = 'https://stream.watsonplatform.net/speech-to-text/api/v1/acoustic_customizations' - train_url = "{0}/{1}/train".format(acoustic_customization_url, 'customid') - - responses.add( - responses.GET, - acoustic_customization_url, - body='{"get response": "yep"}', - status=200, - content_type='application/json') - - responses.add( - responses.POST, - acoustic_customization_url, - body='{"bogus_response": "yep"}', - status=200, - content_type='application/json') - - responses.add( - responses.GET, - "{0}/modelid".format(acoustic_customization_url), - body='{"bogus_response": "yep"}', - status=200, - content_type='application/json') - - responses.add( - responses.DELETE, - "{0}/modelid".format(acoustic_customization_url), - body='{"bogus_response": "yep"}', - status=200, - content_type='application/json') - - responses.add( - responses.POST, - train_url, - body='{"bogus_response": "yep"}', - status=200, - content_type='application/json') - - speech_to_text = ibm_watson.SpeechToTextV1( - username="username", password="password") - - speech_to_text.list_acoustic_models() - - speech_to_text.create_acoustic_model( - name="Example model", - base_model_name="en-US_BroadbandModel", - description="Example custom language model") - - parsed_body = json.loads(_decode_body(responses.calls[1].request.body)) - assert parsed_body['name'] == 'Example model' - - speech_to_text.create_acoustic_model( - name="Example model Two", - base_model_name="en-US_BroadbandModel") - - parsed_body = json.loads(_decode_body(responses.calls[2].request.body)) - assert parsed_body['name'] == 'Example model Two' - assert parsed_body['base_model_name'] == 'en-US_BroadbandModel' - - speech_to_text.train_acoustic_model('customid') - speech_to_text.get_acoustic_model(customization_id='modelid') - speech_to_text.delete_acoustic_model(customization_id='modelid') +import tempfile +import urllib +from ibm_watson.speech_to_text_v1 import * + + +_service = SpeechToTextV1( + authenticator=NoAuthAuthenticator() +) + +_base_url = 'https://api.us-south.speech-to-text.watson.cloud.ibm.com' +_service.set_service_url(_base_url) + + +def preprocess_url(operation_path: str): + """ + Returns the request url associated with the specified operation path. + This will be base_url concatenated with a quoted version of operation_path. + The returned request URL is used to register the mock response so it needs + to match the request URL that is formed by the requests library. + """ + + # Form the request URL from the base URL and operation path. + request_url = _base_url + operation_path + + # If the request url does NOT end with a /, then just return it as-is. + # Otherwise, return a regular expression that matches one or more trailing /. + if not request_url.endswith('/'): + return request_url + return re.compile(request_url.rstrip('/') + '/+') + + +############################################################################## +# Start of Service: Models +############################################################################## +# region + + +class TestListModels: + """ + Test Class for list_models + """ + + @responses.activate + def test_list_models_all_params(self): + """ + list_models() + """ + # Set up mock + url = preprocess_url('/v1/models') + mock_response = '{"models": [{"name": "name", "language": "language", "rate": 4, "url": "url", "supported_features": {"custom_language_model": false, "custom_acoustic_model": false, "speaker_labels": true, "low_latency": false}, "description": "description"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Invoke method + response = _service.list_models() + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_models_all_params_with_retries(self): + # Enable retries and run test_list_models_all_params. + _service.enable_retries() + self.test_list_models_all_params() + + # Disable retries and run test_list_models_all_params. + _service.disable_retries() + self.test_list_models_all_params() + + +class TestGetModel: + """ + Test Class for get_model + """ + + @responses.activate + def test_get_model_all_params(self): + """ + get_model() + """ + # Set up mock + url = preprocess_url('/v1/models/ar-MS_BroadbandModel') + mock_response = '{"name": "name", "language": "language", "rate": 4, "url": "url", "supported_features": {"custom_language_model": false, "custom_acoustic_model": false, "speaker_labels": true, "low_latency": false}, "description": "description"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + model_id = 'ar-MS_BroadbandModel' + + # Invoke method + response = _service.get_model( + model_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_get_model_all_params_with_retries(self): + # Enable retries and run test_get_model_all_params. + _service.enable_retries() + self.test_get_model_all_params() + + # Disable retries and run test_get_model_all_params. + _service.disable_retries() + self.test_get_model_all_params() + + @responses.activate + def test_get_model_value_error(self): + """ + test_get_model_value_error() + """ + # Set up mock + url = preprocess_url('/v1/models/ar-MS_BroadbandModel') + mock_response = '{"name": "name", "language": "language", "rate": 4, "url": "url", "supported_features": {"custom_language_model": false, "custom_acoustic_model": false, "speaker_labels": true, "low_latency": false}, "description": "description"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + model_id = 'ar-MS_BroadbandModel' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "model_id": model_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_model(**req_copy) + + def test_get_model_value_error_with_retries(self): + # Enable retries and run test_get_model_value_error. + _service.enable_retries() + self.test_get_model_value_error() + + # Disable retries and run test_get_model_value_error. + _service.disable_retries() + self.test_get_model_value_error() + + +# endregion +############################################################################## +# End of Service: Models +############################################################################## + +############################################################################## +# Start of Service: Synchronous +############################################################################## +# region + + +class TestRecognize: + """ + Test Class for recognize + """ + + @responses.activate + def test_recognize_all_params(self): + """ + recognize() + """ + # Set up mock + url = preprocess_url('/v1/recognize') + mock_response = '{"results": [{"final": false, "alternatives": [{"transcript": "transcript", "confidence": 0, "timestamps": ["timestamps"], "word_confidence": ["word_confidence"]}], "keywords_result": {"mapKey": [{"normalized_text": "normalized_text", "start_time": 10, "end_time": 8, "confidence": 0}]}, "word_alternatives": [{"start_time": 10, "end_time": 8, "alternatives": [{"confidence": 0, "word": "word"}]}], "end_of_utterance": "end_of_data"}], "result_index": 12, "speaker_labels": [{"from": 5, "to": 2, "speaker": 7, "confidence": 10, "final": false}], "processing_metrics": {"processed_audio": {"received": 8, "seen_by_engine": 14, "transcription": 13, "speaker_labels": 14}, "wall_clock_since_first_byte_received": 36, "periodic": true}, "audio_metrics": {"sampling_interval": 17, "accumulated": {"final": false, "end_time": 8, "signal_to_noise_ratio": 21, "speech_ratio": 12, "high_frequency_loss": 19, "direct_current_offset": [{"begin": 5, "end": 3, "count": 5}], "clipping_rate": [{"begin": 5, "end": 3, "count": 5}], "speech_level": [{"begin": 5, "end": 3, "count": 5}], "non_speech_level": [{"begin": 5, "end": 3, "count": 5}]}}, "warnings": ["warnings"], "enriched_results": {"transcript": {"text": "text", "timestamp": {"from": 5, "to": 2}}, "status": "status"}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + audio = io.BytesIO(b'This is a mock file.').getvalue() + content_type = 'application/octet-stream' + model = 'en-US_BroadbandModel' + speech_begin_event = False + enrichments = 'testString' + language_customization_id = 'testString' + acoustic_customization_id = 'testString' + base_model_version = 'testString' + customization_weight = 72.5 + inactivity_timeout = 30 + keywords = ['testString'] + keywords_threshold = 36.0 + max_alternatives = 1 + word_alternatives_threshold = 36.0 + word_confidence = False + timestamps = False + profanity_filter = True + smart_formatting = False + smart_formatting_version = 0 + speaker_labels = False + grammar_name = 'testString' + redaction = False + audio_metrics = False + end_of_phrase_silence_time = 0.8 + split_transcript_at_phrase_end = False + speech_detector_sensitivity = 0.5 + sad_module = 1 + background_audio_suppression = 0.0 + low_latency = False + character_insertion_bias = 0.0 + + # Invoke method + response = _service.recognize( + audio, + content_type=content_type, + model=model, + speech_begin_event=speech_begin_event, + enrichments=enrichments, + language_customization_id=language_customization_id, + acoustic_customization_id=acoustic_customization_id, + base_model_version=base_model_version, + customization_weight=customization_weight, + inactivity_timeout=inactivity_timeout, + keywords=keywords, + keywords_threshold=keywords_threshold, + max_alternatives=max_alternatives, + word_alternatives_threshold=word_alternatives_threshold, + word_confidence=word_confidence, + timestamps=timestamps, + profanity_filter=profanity_filter, + smart_formatting=smart_formatting, + smart_formatting_version=smart_formatting_version, + speaker_labels=speaker_labels, + grammar_name=grammar_name, + redaction=redaction, + audio_metrics=audio_metrics, + end_of_phrase_silence_time=end_of_phrase_silence_time, + split_transcript_at_phrase_end=split_transcript_at_phrase_end, + speech_detector_sensitivity=speech_detector_sensitivity, + sad_module=sad_module, + background_audio_suppression=background_audio_suppression, + low_latency=low_latency, + character_insertion_bias=character_insertion_bias, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'model={}'.format(model) in query_string + assert 'speech_begin_event={}'.format('true' if speech_begin_event else 'false') in query_string + assert 'enrichments={}'.format(enrichments) in query_string + assert 'language_customization_id={}'.format(language_customization_id) in query_string + assert 'acoustic_customization_id={}'.format(acoustic_customization_id) in query_string + assert 'base_model_version={}'.format(base_model_version) in query_string + assert 'customization_weight={}'.format(customization_weight) in query_string + assert 'inactivity_timeout={}'.format(inactivity_timeout) in query_string + assert 'keywords={}'.format(','.join(keywords)) in query_string + assert 'max_alternatives={}'.format(max_alternatives) in query_string + assert 'word_confidence={}'.format('true' if word_confidence else 'false') in query_string + assert 'timestamps={}'.format('true' if timestamps else 'false') in query_string + assert 'profanity_filter={}'.format('true' if profanity_filter else 'false') in query_string + assert 'smart_formatting={}'.format('true' if smart_formatting else 'false') in query_string + assert 'smart_formatting_version={}'.format(smart_formatting_version) in query_string + assert 'speaker_labels={}'.format('true' if speaker_labels else 'false') in query_string + assert 'grammar_name={}'.format(grammar_name) in query_string + assert 'redaction={}'.format('true' if redaction else 'false') in query_string + assert 'audio_metrics={}'.format('true' if audio_metrics else 'false') in query_string + assert 'end_of_phrase_silence_time={}'.format(end_of_phrase_silence_time) in query_string + assert 'split_transcript_at_phrase_end={}'.format('true' if split_transcript_at_phrase_end else 'false') in query_string + assert 'sad_module={}'.format(sad_module) in query_string + assert 'low_latency={}'.format('true' if low_latency else 'false') in query_string + # Validate body params + + def test_recognize_all_params_with_retries(self): + # Enable retries and run test_recognize_all_params. + _service.enable_retries() + self.test_recognize_all_params() + + # Disable retries and run test_recognize_all_params. + _service.disable_retries() + self.test_recognize_all_params() + + @responses.activate + def test_recognize_required_params(self): + """ + test_recognize_required_params() + """ + # Set up mock + url = preprocess_url('/v1/recognize') + mock_response = '{"results": [{"final": false, "alternatives": [{"transcript": "transcript", "confidence": 0, "timestamps": ["timestamps"], "word_confidence": ["word_confidence"]}], "keywords_result": {"mapKey": [{"normalized_text": "normalized_text", "start_time": 10, "end_time": 8, "confidence": 0}]}, "word_alternatives": [{"start_time": 10, "end_time": 8, "alternatives": [{"confidence": 0, "word": "word"}]}], "end_of_utterance": "end_of_data"}], "result_index": 12, "speaker_labels": [{"from": 5, "to": 2, "speaker": 7, "confidence": 10, "final": false}], "processing_metrics": {"processed_audio": {"received": 8, "seen_by_engine": 14, "transcription": 13, "speaker_labels": 14}, "wall_clock_since_first_byte_received": 36, "periodic": true}, "audio_metrics": {"sampling_interval": 17, "accumulated": {"final": false, "end_time": 8, "signal_to_noise_ratio": 21, "speech_ratio": 12, "high_frequency_loss": 19, "direct_current_offset": [{"begin": 5, "end": 3, "count": 5}], "clipping_rate": [{"begin": 5, "end": 3, "count": 5}], "speech_level": [{"begin": 5, "end": 3, "count": 5}], "non_speech_level": [{"begin": 5, "end": 3, "count": 5}]}}, "warnings": ["warnings"], "enriched_results": {"transcript": {"text": "text", "timestamp": {"from": 5, "to": 2}}, "status": "status"}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + audio = io.BytesIO(b'This is a mock file.').getvalue() + + # Invoke method + response = _service.recognize( + audio, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate body params + + def test_recognize_required_params_with_retries(self): + # Enable retries and run test_recognize_required_params. + _service.enable_retries() + self.test_recognize_required_params() + + # Disable retries and run test_recognize_required_params. + _service.disable_retries() + self.test_recognize_required_params() + + @responses.activate + def test_recognize_value_error(self): + """ + test_recognize_value_error() + """ + # Set up mock + url = preprocess_url('/v1/recognize') + mock_response = '{"results": [{"final": false, "alternatives": [{"transcript": "transcript", "confidence": 0, "timestamps": ["timestamps"], "word_confidence": ["word_confidence"]}], "keywords_result": {"mapKey": [{"normalized_text": "normalized_text", "start_time": 10, "end_time": 8, "confidence": 0}]}, "word_alternatives": [{"start_time": 10, "end_time": 8, "alternatives": [{"confidence": 0, "word": "word"}]}], "end_of_utterance": "end_of_data"}], "result_index": 12, "speaker_labels": [{"from": 5, "to": 2, "speaker": 7, "confidence": 10, "final": false}], "processing_metrics": {"processed_audio": {"received": 8, "seen_by_engine": 14, "transcription": 13, "speaker_labels": 14}, "wall_clock_since_first_byte_received": 36, "periodic": true}, "audio_metrics": {"sampling_interval": 17, "accumulated": {"final": false, "end_time": 8, "signal_to_noise_ratio": 21, "speech_ratio": 12, "high_frequency_loss": 19, "direct_current_offset": [{"begin": 5, "end": 3, "count": 5}], "clipping_rate": [{"begin": 5, "end": 3, "count": 5}], "speech_level": [{"begin": 5, "end": 3, "count": 5}], "non_speech_level": [{"begin": 5, "end": 3, "count": 5}]}}, "warnings": ["warnings"], "enriched_results": {"transcript": {"text": "text", "timestamp": {"from": 5, "to": 2}}, "status": "status"}}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + audio = io.BytesIO(b'This is a mock file.').getvalue() + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "audio": audio, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.recognize(**req_copy) + + def test_recognize_value_error_with_retries(self): + # Enable retries and run test_recognize_value_error. + _service.enable_retries() + self.test_recognize_value_error() + + # Disable retries and run test_recognize_value_error. + _service.disable_retries() + self.test_recognize_value_error() + + +# endregion +############################################################################## +# End of Service: Synchronous +############################################################################## + +############################################################################## +# Start of Service: Asynchronous +############################################################################## +# region + + +class TestRegisterCallback: + """ + Test Class for register_callback + """ + + @responses.activate + def test_register_callback_all_params(self): + """ + register_callback() + """ + # Set up mock + url = preprocess_url('/v1/register_callback') + mock_response = '{"status": "created", "url": "url"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + callback_url = 'testString' + user_secret = 'testString' + + # Invoke method + response = _service.register_callback( + callback_url, + user_secret=user_secret, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'callback_url={}'.format(callback_url) in query_string + assert 'user_secret={}'.format(user_secret) in query_string + + def test_register_callback_all_params_with_retries(self): + # Enable retries and run test_register_callback_all_params. + _service.enable_retries() + self.test_register_callback_all_params() + + # Disable retries and run test_register_callback_all_params. + _service.disable_retries() + self.test_register_callback_all_params() + + @responses.activate + def test_register_callback_required_params(self): + """ + test_register_callback_required_params() + """ + # Set up mock + url = preprocess_url('/v1/register_callback') + mock_response = '{"status": "created", "url": "url"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + callback_url = 'testString' + + # Invoke method + response = _service.register_callback( + callback_url, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'callback_url={}'.format(callback_url) in query_string + + def test_register_callback_required_params_with_retries(self): + # Enable retries and run test_register_callback_required_params. + _service.enable_retries() + self.test_register_callback_required_params() + + # Disable retries and run test_register_callback_required_params. + _service.disable_retries() + self.test_register_callback_required_params() + + @responses.activate + def test_register_callback_value_error(self): + """ + test_register_callback_value_error() + """ + # Set up mock + url = preprocess_url('/v1/register_callback') + mock_response = '{"status": "created", "url": "url"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + callback_url = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "callback_url": callback_url, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.register_callback(**req_copy) + + def test_register_callback_value_error_with_retries(self): + # Enable retries and run test_register_callback_value_error. + _service.enable_retries() + self.test_register_callback_value_error() + + # Disable retries and run test_register_callback_value_error. + _service.disable_retries() + self.test_register_callback_value_error() + + +class TestUnregisterCallback: + """ + Test Class for unregister_callback + """ + + @responses.activate + def test_unregister_callback_all_params(self): + """ + unregister_callback() + """ + # Set up mock + url = preprocess_url('/v1/unregister_callback') + responses.add( + responses.POST, + url, + status=200, + ) + + # Set up parameter values + callback_url = 'testString' + + # Invoke method + response = _service.unregister_callback( + callback_url, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'callback_url={}'.format(callback_url) in query_string + + def test_unregister_callback_all_params_with_retries(self): + # Enable retries and run test_unregister_callback_all_params. + _service.enable_retries() + self.test_unregister_callback_all_params() + + # Disable retries and run test_unregister_callback_all_params. + _service.disable_retries() + self.test_unregister_callback_all_params() + + @responses.activate + def test_unregister_callback_value_error(self): + """ + test_unregister_callback_value_error() + """ + # Set up mock + url = preprocess_url('/v1/unregister_callback') + responses.add( + responses.POST, + url, + status=200, + ) + + # Set up parameter values + callback_url = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "callback_url": callback_url, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.unregister_callback(**req_copy) + + def test_unregister_callback_value_error_with_retries(self): + # Enable retries and run test_unregister_callback_value_error. + _service.enable_retries() + self.test_unregister_callback_value_error() + + # Disable retries and run test_unregister_callback_value_error. + _service.disable_retries() + self.test_unregister_callback_value_error() + + +class TestCreateJob: + """ + Test Class for create_job + """ + + @responses.activate + def test_create_job_all_params(self): + """ + create_job() + """ + # Set up mock + url = preprocess_url('/v1/recognitions') + mock_response = '{"id": "id", "status": "waiting", "created": "created", "updated": "updated", "url": "url", "user_token": "user_token", "results": [{"results": [{"final": false, "alternatives": [{"transcript": "transcript", "confidence": 0, "timestamps": ["timestamps"], "word_confidence": ["word_confidence"]}], "keywords_result": {"mapKey": [{"normalized_text": "normalized_text", "start_time": 10, "end_time": 8, "confidence": 0}]}, "word_alternatives": [{"start_time": 10, "end_time": 8, "alternatives": [{"confidence": 0, "word": "word"}]}], "end_of_utterance": "end_of_data"}], "result_index": 12, "speaker_labels": [{"from": 5, "to": 2, "speaker": 7, "confidence": 10, "final": false}], "processing_metrics": {"processed_audio": {"received": 8, "seen_by_engine": 14, "transcription": 13, "speaker_labels": 14}, "wall_clock_since_first_byte_received": 36, "periodic": true}, "audio_metrics": {"sampling_interval": 17, "accumulated": {"final": false, "end_time": 8, "signal_to_noise_ratio": 21, "speech_ratio": 12, "high_frequency_loss": 19, "direct_current_offset": [{"begin": 5, "end": 3, "count": 5}], "clipping_rate": [{"begin": 5, "end": 3, "count": 5}], "speech_level": [{"begin": 5, "end": 3, "count": 5}], "non_speech_level": [{"begin": 5, "end": 3, "count": 5}]}}, "warnings": ["warnings"], "enriched_results": {"transcript": {"text": "text", "timestamp": {"from": 5, "to": 2}}, "status": "status"}}], "warnings": ["warnings"]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Set up parameter values + audio = io.BytesIO(b'This is a mock file.').getvalue() + content_type = 'application/octet-stream' + model = 'en-US_BroadbandModel' + callback_url = 'testString' + events = 'recognitions.started' + user_token = 'testString' + results_ttl = 38 + speech_begin_event = False + enrichments = 'testString' + language_customization_id = 'testString' + acoustic_customization_id = 'testString' + base_model_version = 'testString' + customization_weight = 72.5 + inactivity_timeout = 30 + keywords = ['testString'] + keywords_threshold = 36.0 + max_alternatives = 1 + word_alternatives_threshold = 36.0 + word_confidence = False + timestamps = False + profanity_filter = True + smart_formatting = False + smart_formatting_version = 0 + speaker_labels = False + grammar_name = 'testString' + redaction = False + processing_metrics = False + processing_metrics_interval = 1.0 + audio_metrics = False + end_of_phrase_silence_time = 0.8 + split_transcript_at_phrase_end = False + speech_detector_sensitivity = 0.5 + sad_module = 1 + background_audio_suppression = 0.0 + low_latency = False + character_insertion_bias = 0.0 + + # Invoke method + response = _service.create_job( + audio, + content_type=content_type, + model=model, + callback_url=callback_url, + events=events, + user_token=user_token, + results_ttl=results_ttl, + speech_begin_event=speech_begin_event, + enrichments=enrichments, + language_customization_id=language_customization_id, + acoustic_customization_id=acoustic_customization_id, + base_model_version=base_model_version, + customization_weight=customization_weight, + inactivity_timeout=inactivity_timeout, + keywords=keywords, + keywords_threshold=keywords_threshold, + max_alternatives=max_alternatives, + word_alternatives_threshold=word_alternatives_threshold, + word_confidence=word_confidence, + timestamps=timestamps, + profanity_filter=profanity_filter, + smart_formatting=smart_formatting, + smart_formatting_version=smart_formatting_version, + speaker_labels=speaker_labels, + grammar_name=grammar_name, + redaction=redaction, + processing_metrics=processing_metrics, + processing_metrics_interval=processing_metrics_interval, + audio_metrics=audio_metrics, + end_of_phrase_silence_time=end_of_phrase_silence_time, + split_transcript_at_phrase_end=split_transcript_at_phrase_end, + speech_detector_sensitivity=speech_detector_sensitivity, + sad_module=sad_module, + background_audio_suppression=background_audio_suppression, + low_latency=low_latency, + character_insertion_bias=character_insertion_bias, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'model={}'.format(model) in query_string + assert 'callback_url={}'.format(callback_url) in query_string + assert 'events={}'.format(events) in query_string + assert 'user_token={}'.format(user_token) in query_string + assert 'results_ttl={}'.format(results_ttl) in query_string + assert 'speech_begin_event={}'.format('true' if speech_begin_event else 'false') in query_string + assert 'enrichments={}'.format(enrichments) in query_string + assert 'language_customization_id={}'.format(language_customization_id) in query_string + assert 'acoustic_customization_id={}'.format(acoustic_customization_id) in query_string + assert 'base_model_version={}'.format(base_model_version) in query_string + assert 'customization_weight={}'.format(customization_weight) in query_string + assert 'inactivity_timeout={}'.format(inactivity_timeout) in query_string + assert 'keywords={}'.format(','.join(keywords)) in query_string + assert 'max_alternatives={}'.format(max_alternatives) in query_string + assert 'word_confidence={}'.format('true' if word_confidence else 'false') in query_string + assert 'timestamps={}'.format('true' if timestamps else 'false') in query_string + assert 'profanity_filter={}'.format('true' if profanity_filter else 'false') in query_string + assert 'smart_formatting={}'.format('true' if smart_formatting else 'false') in query_string + assert 'smart_formatting_version={}'.format(smart_formatting_version) in query_string + assert 'speaker_labels={}'.format('true' if speaker_labels else 'false') in query_string + assert 'grammar_name={}'.format(grammar_name) in query_string + assert 'redaction={}'.format('true' if redaction else 'false') in query_string + assert 'processing_metrics={}'.format('true' if processing_metrics else 'false') in query_string + assert 'audio_metrics={}'.format('true' if audio_metrics else 'false') in query_string + assert 'end_of_phrase_silence_time={}'.format(end_of_phrase_silence_time) in query_string + assert 'split_transcript_at_phrase_end={}'.format('true' if split_transcript_at_phrase_end else 'false') in query_string + assert 'sad_module={}'.format(sad_module) in query_string + assert 'low_latency={}'.format('true' if low_latency else 'false') in query_string + # Validate body params + + def test_create_job_all_params_with_retries(self): + # Enable retries and run test_create_job_all_params. + _service.enable_retries() + self.test_create_job_all_params() + + # Disable retries and run test_create_job_all_params. + _service.disable_retries() + self.test_create_job_all_params() + + @responses.activate + def test_create_job_required_params(self): + """ + test_create_job_required_params() + """ + # Set up mock + url = preprocess_url('/v1/recognitions') + mock_response = '{"id": "id", "status": "waiting", "created": "created", "updated": "updated", "url": "url", "user_token": "user_token", "results": [{"results": [{"final": false, "alternatives": [{"transcript": "transcript", "confidence": 0, "timestamps": ["timestamps"], "word_confidence": ["word_confidence"]}], "keywords_result": {"mapKey": [{"normalized_text": "normalized_text", "start_time": 10, "end_time": 8, "confidence": 0}]}, "word_alternatives": [{"start_time": 10, "end_time": 8, "alternatives": [{"confidence": 0, "word": "word"}]}], "end_of_utterance": "end_of_data"}], "result_index": 12, "speaker_labels": [{"from": 5, "to": 2, "speaker": 7, "confidence": 10, "final": false}], "processing_metrics": {"processed_audio": {"received": 8, "seen_by_engine": 14, "transcription": 13, "speaker_labels": 14}, "wall_clock_since_first_byte_received": 36, "periodic": true}, "audio_metrics": {"sampling_interval": 17, "accumulated": {"final": false, "end_time": 8, "signal_to_noise_ratio": 21, "speech_ratio": 12, "high_frequency_loss": 19, "direct_current_offset": [{"begin": 5, "end": 3, "count": 5}], "clipping_rate": [{"begin": 5, "end": 3, "count": 5}], "speech_level": [{"begin": 5, "end": 3, "count": 5}], "non_speech_level": [{"begin": 5, "end": 3, "count": 5}]}}, "warnings": ["warnings"], "enriched_results": {"transcript": {"text": "text", "timestamp": {"from": 5, "to": 2}}, "status": "status"}}], "warnings": ["warnings"]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Set up parameter values + audio = io.BytesIO(b'This is a mock file.').getvalue() + + # Invoke method + response = _service.create_job( + audio, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + # Validate body params + + def test_create_job_required_params_with_retries(self): + # Enable retries and run test_create_job_required_params. + _service.enable_retries() + self.test_create_job_required_params() + + # Disable retries and run test_create_job_required_params. + _service.disable_retries() + self.test_create_job_required_params() + + @responses.activate + def test_create_job_value_error(self): + """ + test_create_job_value_error() + """ + # Set up mock + url = preprocess_url('/v1/recognitions') + mock_response = '{"id": "id", "status": "waiting", "created": "created", "updated": "updated", "url": "url", "user_token": "user_token", "results": [{"results": [{"final": false, "alternatives": [{"transcript": "transcript", "confidence": 0, "timestamps": ["timestamps"], "word_confidence": ["word_confidence"]}], "keywords_result": {"mapKey": [{"normalized_text": "normalized_text", "start_time": 10, "end_time": 8, "confidence": 0}]}, "word_alternatives": [{"start_time": 10, "end_time": 8, "alternatives": [{"confidence": 0, "word": "word"}]}], "end_of_utterance": "end_of_data"}], "result_index": 12, "speaker_labels": [{"from": 5, "to": 2, "speaker": 7, "confidence": 10, "final": false}], "processing_metrics": {"processed_audio": {"received": 8, "seen_by_engine": 14, "transcription": 13, "speaker_labels": 14}, "wall_clock_since_first_byte_received": 36, "periodic": true}, "audio_metrics": {"sampling_interval": 17, "accumulated": {"final": false, "end_time": 8, "signal_to_noise_ratio": 21, "speech_ratio": 12, "high_frequency_loss": 19, "direct_current_offset": [{"begin": 5, "end": 3, "count": 5}], "clipping_rate": [{"begin": 5, "end": 3, "count": 5}], "speech_level": [{"begin": 5, "end": 3, "count": 5}], "non_speech_level": [{"begin": 5, "end": 3, "count": 5}]}}, "warnings": ["warnings"], "enriched_results": {"transcript": {"text": "text", "timestamp": {"from": 5, "to": 2}}, "status": "status"}}], "warnings": ["warnings"]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) - assert len(responses.calls) == 6 - -@responses.activate -def test_upgrade_acoustic_model(): - acoustic_customization_url = 'https://stream.watsonplatform.net/speech-to-text/api/v1/acoustic_customizations' - upgrade_url = "{0}/{1}/upgrade_model".format(acoustic_customization_url, 'customid') + # Set up parameter values + audio = io.BytesIO(b'This is a mock file.').getvalue() - responses.add( - responses.POST, - upgrade_url, - body='{"bogus_response": "yep"}', - status=200, - content_type='application/json') - - speech_to_text = ibm_watson.SpeechToTextV1( - username="username", password="password") + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "audio": audio, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.create_job(**req_copy) - speech_to_text.upgrade_acoustic_model( - 'customid', - 'model_x', - force=True) - assert responses.calls[0].response.json() == {"bogus_response": "yep"} - - assert len(responses.calls) == 1 - - -def test_custom_corpora(): + def test_create_job_value_error_with_retries(self): + # Enable retries and run test_create_job_value_error. + _service.enable_retries() + self.test_create_job_value_error() - corpora_url = 'https://stream.watsonplatform.net/speech-to-text/api/v1/customizations/{0}/corpora' - get_corpora_url = '{0}/{1}'.format( - corpora_url.format('customid'), 'corpus') + # Disable retries and run test_create_job_value_error. + _service.disable_retries() + self.test_create_job_value_error() - with responses.RequestsMock(assert_all_requests_are_fired=True) as rsps: - rsps.add( + +class TestCheckJobs: + """ + Test Class for check_jobs + """ + + @responses.activate + def test_check_jobs_all_params(self): + """ + check_jobs() + """ + # Set up mock + url = preprocess_url('/v1/recognitions') + mock_response = '{"recognitions": [{"id": "id", "status": "waiting", "created": "created", "updated": "updated", "url": "url", "user_token": "user_token", "results": [{"results": [{"final": false, "alternatives": [{"transcript": "transcript", "confidence": 0, "timestamps": ["timestamps"], "word_confidence": ["word_confidence"]}], "keywords_result": {"mapKey": [{"normalized_text": "normalized_text", "start_time": 10, "end_time": 8, "confidence": 0}]}, "word_alternatives": [{"start_time": 10, "end_time": 8, "alternatives": [{"confidence": 0, "word": "word"}]}], "end_of_utterance": "end_of_data"}], "result_index": 12, "speaker_labels": [{"from": 5, "to": 2, "speaker": 7, "confidence": 10, "final": false}], "processing_metrics": {"processed_audio": {"received": 8, "seen_by_engine": 14, "transcription": 13, "speaker_labels": 14}, "wall_clock_since_first_byte_received": 36, "periodic": true}, "audio_metrics": {"sampling_interval": 17, "accumulated": {"final": false, "end_time": 8, "signal_to_noise_ratio": 21, "speech_ratio": 12, "high_frequency_loss": 19, "direct_current_offset": [{"begin": 5, "end": 3, "count": 5}], "clipping_rate": [{"begin": 5, "end": 3, "count": 5}], "speech_level": [{"begin": 5, "end": 3, "count": 5}], "non_speech_level": [{"begin": 5, "end": 3, "count": 5}]}}, "warnings": ["warnings"], "enriched_results": {"transcript": {"text": "text", "timestamp": {"from": 5, "to": 2}}, "status": "status"}}], "warnings": ["warnings"]}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Invoke method + response = _service.check_jobs() + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_check_jobs_all_params_with_retries(self): + # Enable retries and run test_check_jobs_all_params. + _service.enable_retries() + self.test_check_jobs_all_params() + + # Disable retries and run test_check_jobs_all_params. + _service.disable_retries() + self.test_check_jobs_all_params() + + +class TestCheckJob: + """ + Test Class for check_job + """ + + @responses.activate + def test_check_job_all_params(self): + """ + check_job() + """ + # Set up mock + url = preprocess_url('/v1/recognitions/testString') + mock_response = '{"id": "id", "status": "waiting", "created": "created", "updated": "updated", "url": "url", "user_token": "user_token", "results": [{"results": [{"final": false, "alternatives": [{"transcript": "transcript", "confidence": 0, "timestamps": ["timestamps"], "word_confidence": ["word_confidence"]}], "keywords_result": {"mapKey": [{"normalized_text": "normalized_text", "start_time": 10, "end_time": 8, "confidence": 0}]}, "word_alternatives": [{"start_time": 10, "end_time": 8, "alternatives": [{"confidence": 0, "word": "word"}]}], "end_of_utterance": "end_of_data"}], "result_index": 12, "speaker_labels": [{"from": 5, "to": 2, "speaker": 7, "confidence": 10, "final": false}], "processing_metrics": {"processed_audio": {"received": 8, "seen_by_engine": 14, "transcription": 13, "speaker_labels": 14}, "wall_clock_since_first_byte_received": 36, "periodic": true}, "audio_metrics": {"sampling_interval": 17, "accumulated": {"final": false, "end_time": 8, "signal_to_noise_ratio": 21, "speech_ratio": 12, "high_frequency_loss": 19, "direct_current_offset": [{"begin": 5, "end": 3, "count": 5}], "clipping_rate": [{"begin": 5, "end": 3, "count": 5}], "speech_level": [{"begin": 5, "end": 3, "count": 5}], "non_speech_level": [{"begin": 5, "end": 3, "count": 5}]}}, "warnings": ["warnings"], "enriched_results": {"transcript": {"text": "text", "timestamp": {"from": 5, "to": 2}}, "status": "status"}}], "warnings": ["warnings"]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + id = 'testString' + + # Invoke method + response = _service.check_job( + id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_check_job_all_params_with_retries(self): + # Enable retries and run test_check_job_all_params. + _service.enable_retries() + self.test_check_job_all_params() + + # Disable retries and run test_check_job_all_params. + _service.disable_retries() + self.test_check_job_all_params() + + @responses.activate + def test_check_job_value_error(self): + """ + test_check_job_value_error() + """ + # Set up mock + url = preprocess_url('/v1/recognitions/testString') + mock_response = '{"id": "id", "status": "waiting", "created": "created", "updated": "updated", "url": "url", "user_token": "user_token", "results": [{"results": [{"final": false, "alternatives": [{"transcript": "transcript", "confidence": 0, "timestamps": ["timestamps"], "word_confidence": ["word_confidence"]}], "keywords_result": {"mapKey": [{"normalized_text": "normalized_text", "start_time": 10, "end_time": 8, "confidence": 0}]}, "word_alternatives": [{"start_time": 10, "end_time": 8, "alternatives": [{"confidence": 0, "word": "word"}]}], "end_of_utterance": "end_of_data"}], "result_index": 12, "speaker_labels": [{"from": 5, "to": 2, "speaker": 7, "confidence": 10, "final": false}], "processing_metrics": {"processed_audio": {"received": 8, "seen_by_engine": 14, "transcription": 13, "speaker_labels": 14}, "wall_clock_since_first_byte_received": 36, "periodic": true}, "audio_metrics": {"sampling_interval": 17, "accumulated": {"final": false, "end_time": 8, "signal_to_noise_ratio": 21, "speech_ratio": 12, "high_frequency_loss": 19, "direct_current_offset": [{"begin": 5, "end": 3, "count": 5}], "clipping_rate": [{"begin": 5, "end": 3, "count": 5}], "speech_level": [{"begin": 5, "end": 3, "count": 5}], "non_speech_level": [{"begin": 5, "end": 3, "count": 5}]}}, "warnings": ["warnings"], "enriched_results": {"transcript": {"text": "text", "timestamp": {"from": 5, "to": 2}}, "status": "status"}}], "warnings": ["warnings"]}' + responses.add( responses.GET, - corpora_url.format('customid'), - body='{"get response": "yep"}', + url, + body=mock_response, + content_type='application/json', status=200, - content_type='application/json') + ) + + # Set up parameter values + id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "id": id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.check_job(**req_copy) + + def test_check_job_value_error_with_retries(self): + # Enable retries and run test_check_job_value_error. + _service.enable_retries() + self.test_check_job_value_error() + + # Disable retries and run test_check_job_value_error. + _service.disable_retries() + self.test_check_job_value_error() + + +class TestDeleteJob: + """ + Test Class for delete_job + """ + + @responses.activate + def test_delete_job_all_params(self): + """ + delete_job() + """ + # Set up mock + url = preprocess_url('/v1/recognitions/testString') + responses.add( + responses.DELETE, + url, + status=204, + ) + + # Set up parameter values + id = 'testString' + + # Invoke method + response = _service.delete_job( + id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 204 + + def test_delete_job_all_params_with_retries(self): + # Enable retries and run test_delete_job_all_params. + _service.enable_retries() + self.test_delete_job_all_params() + + # Disable retries and run test_delete_job_all_params. + _service.disable_retries() + self.test_delete_job_all_params() - rsps.add( + @responses.activate + def test_delete_job_value_error(self): + """ + test_delete_job_value_error() + """ + # Set up mock + url = preprocess_url('/v1/recognitions/testString') + responses.add( + responses.DELETE, + url, + status=204, + ) + + # Set up parameter values + id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "id": id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_job(**req_copy) + + def test_delete_job_value_error_with_retries(self): + # Enable retries and run test_delete_job_value_error. + _service.enable_retries() + self.test_delete_job_value_error() + + # Disable retries and run test_delete_job_value_error. + _service.disable_retries() + self.test_delete_job_value_error() + + +# endregion +############################################################################## +# End of Service: Asynchronous +############################################################################## + +############################################################################## +# Start of Service: CustomLanguageModels +############################################################################## +# region + + +class TestCreateLanguageModel: + """ + Test Class for create_language_model + """ + + @responses.activate + def test_create_language_model_all_params(self): + """ + create_language_model() + """ + # Set up mock + url = preprocess_url('/v1/customizations') + mock_response = '{"customization_id": "customization_id", "created": "created", "updated": "updated", "language": "language", "dialect": "dialect", "versions": ["versions"], "owner": "owner", "name": "name", "description": "description", "base_model_name": "base_model_name", "status": "pending", "progress": 8, "error": "error", "warnings": "warnings"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Set up parameter values + name = 'testString' + base_model_name = 'ar-MS_Telephony' + dialect = 'testString' + description = 'testString' + + # Invoke method + response = _service.create_language_model( + name, + base_model_name, + dialect=dialect, + description=description, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['name'] == 'testString' + assert req_body['base_model_name'] == 'ar-MS_Telephony' + assert req_body['dialect'] == 'testString' + assert req_body['description'] == 'testString' + + def test_create_language_model_all_params_with_retries(self): + # Enable retries and run test_create_language_model_all_params. + _service.enable_retries() + self.test_create_language_model_all_params() + + # Disable retries and run test_create_language_model_all_params. + _service.disable_retries() + self.test_create_language_model_all_params() + + @responses.activate + def test_create_language_model_value_error(self): + """ + test_create_language_model_value_error() + """ + # Set up mock + url = preprocess_url('/v1/customizations') + mock_response = '{"customization_id": "customization_id", "created": "created", "updated": "updated", "language": "language", "dialect": "dialect", "versions": ["versions"], "owner": "owner", "name": "name", "description": "description", "base_model_name": "base_model_name", "status": "pending", "progress": 8, "error": "error", "warnings": "warnings"}' + responses.add( responses.POST, - get_corpora_url, - body='{"get response": "yep"}', + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Set up parameter values + name = 'testString' + base_model_name = 'ar-MS_Telephony' + dialect = 'testString' + description = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "name": name, + "base_model_name": base_model_name, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.create_language_model(**req_copy) + + def test_create_language_model_value_error_with_retries(self): + # Enable retries and run test_create_language_model_value_error. + _service.enable_retries() + self.test_create_language_model_value_error() + + # Disable retries and run test_create_language_model_value_error. + _service.disable_retries() + self.test_create_language_model_value_error() + + +class TestListLanguageModels: + """ + Test Class for list_language_models + """ + + @responses.activate + def test_list_language_models_all_params(self): + """ + list_language_models() + """ + # Set up mock + url = preprocess_url('/v1/customizations') + mock_response = '{"customizations": [{"customization_id": "customization_id", "created": "created", "updated": "updated", "language": "language", "dialect": "dialect", "versions": ["versions"], "owner": "owner", "name": "name", "description": "description", "base_model_name": "base_model_name", "status": "pending", "progress": 8, "error": "error", "warnings": "warnings"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + language = 'ar-MS' + + # Invoke method + response = _service.list_language_models( + language=language, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'language={}'.format(language) in query_string + + def test_list_language_models_all_params_with_retries(self): + # Enable retries and run test_list_language_models_all_params. + _service.enable_retries() + self.test_list_language_models_all_params() + + # Disable retries and run test_list_language_models_all_params. + _service.disable_retries() + self.test_list_language_models_all_params() + + @responses.activate + def test_list_language_models_required_params(self): + """ + test_list_language_models_required_params() + """ + # Set up mock + url = preprocess_url('/v1/customizations') + mock_response = '{"customizations": [{"customization_id": "customization_id", "created": "created", "updated": "updated", "language": "language", "dialect": "dialect", "versions": ["versions"], "owner": "owner", "name": "name", "description": "description", "base_model_name": "base_model_name", "status": "pending", "progress": 8, "error": "error", "warnings": "warnings"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Invoke method + response = _service.list_language_models() + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_language_models_required_params_with_retries(self): + # Enable retries and run test_list_language_models_required_params. + _service.enable_retries() + self.test_list_language_models_required_params() + + # Disable retries and run test_list_language_models_required_params. + _service.disable_retries() + self.test_list_language_models_required_params() + + +class TestGetLanguageModel: + """ + Test Class for get_language_model + """ + + @responses.activate + def test_get_language_model_all_params(self): + """ + get_language_model() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString') + mock_response = '{"customization_id": "customization_id", "created": "created", "updated": "updated", "language": "language", "dialect": "dialect", "versions": ["versions"], "owner": "owner", "name": "name", "description": "description", "base_model_name": "base_model_name", "status": "pending", "progress": 8, "error": "error", "warnings": "warnings"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', status=200, - content_type='application/json') + ) + + # Set up parameter values + customization_id = 'testString' + + # Invoke method + response = _service.get_language_model( + customization_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_get_language_model_all_params_with_retries(self): + # Enable retries and run test_get_language_model_all_params. + _service.enable_retries() + self.test_get_language_model_all_params() + + # Disable retries and run test_get_language_model_all_params. + _service.disable_retries() + self.test_get_language_model_all_params() - rsps.add( + @responses.activate + def test_get_language_model_value_error(self): + """ + test_get_language_model_value_error() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString') + mock_response = '{"customization_id": "customization_id", "created": "created", "updated": "updated", "language": "language", "dialect": "dialect", "versions": ["versions"], "owner": "owner", "name": "name", "description": "description", "base_model_name": "base_model_name", "status": "pending", "progress": 8, "error": "error", "warnings": "warnings"}' + responses.add( responses.GET, - get_corpora_url, - body='{"get response": "yep"}', + url, + body=mock_response, + content_type='application/json', status=200, - content_type='application/json') + ) - rsps.add( + # Set up parameter values + customization_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_language_model(**req_copy) + + def test_get_language_model_value_error_with_retries(self): + # Enable retries and run test_get_language_model_value_error. + _service.enable_retries() + self.test_get_language_model_value_error() + + # Disable retries and run test_get_language_model_value_error. + _service.disable_retries() + self.test_get_language_model_value_error() + + +class TestDeleteLanguageModel: + """ + Test Class for delete_language_model + """ + + @responses.activate + def test_delete_language_model_all_params(self): + """ + delete_language_model() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + + # Invoke method + response = _service.delete_language_model( + customization_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_delete_language_model_all_params_with_retries(self): + # Enable retries and run test_delete_language_model_all_params. + _service.enable_retries() + self.test_delete_language_model_all_params() + + # Disable retries and run test_delete_language_model_all_params. + _service.disable_retries() + self.test_delete_language_model_all_params() + + @responses.activate + def test_delete_language_model_value_error(self): + """ + test_delete_language_model_value_error() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString') + responses.add( responses.DELETE, - get_corpora_url, - body='{"get response": "yep"}', - status=200, - content_type='application/json') - - speech_to_text = ibm_watson.SpeechToTextV1( - username="username", password="password") - - speech_to_text.list_corpora(customization_id='customid') - - file_path = '../../resources/speech_to_text/corpus-short-1.txt' - full_path = os.path.join(os.path.dirname(__file__), file_path) - with open(full_path) as corpus_file: - speech_to_text.add_corpus( - customization_id='customid', - corpus_name="corpus", - corpus_file=corpus_file) - - speech_to_text.get_corpus( - customization_id='customid', corpus_name='corpus') - - speech_to_text.delete_corpus( - customization_id='customid', corpus_name='corpus') - - -@responses.activate -def test_custom_words(): - words_url = 'https://stream.watsonplatform.net/speech-to-text/api/v1/customizations/{0}/words' - word_url = 'https://stream.watsonplatform.net/speech-to-text/api/v1/customizations/{0}/words/{1}' - - responses.add( - responses.PUT, - word_url.format('custid', 'IEEE'), - body='{"get response": "yep"}', - status=200, - content_type='application/json') - - responses.add( - responses.PUT, - word_url.format('custid', 'wordname'), - body='{"get response": "yep"}', - status=200, - content_type='application/json') - - responses.add( - responses.DELETE, - word_url.format('custid', 'IEEE'), - body='{"get response": "yep"}', - status=200, - content_type='application/json') - - responses.add( - responses.DELETE, - word_url.format('custid', 'wordname'), - body='{"get response": "yep"}', - status=200, - content_type='application/json') - - responses.add( - responses.GET, - word_url.format('custid', 'IEEE'), - body='{"get response": "yep"}', - status=200, - content_type='application/json') - - responses.add( - responses.GET, - word_url.format('custid', 'wordname'), - body='{"get response": "yep"}', - status=200, - content_type='application/json') - - responses.add( - responses.POST, - words_url.format('custid'), - body='{"get response": "yep"}', - status=200, - content_type='application/json') - - responses.add( - responses.GET, - words_url.format('custid'), - body='{"get response": "yep"}', - status=200, - content_type='application/json') - - speech_to_text = ibm_watson.SpeechToTextV1( - username="username", password="password") - - custom_word = CustomWord( - word="IEEE", sounds_like=["i triple e"], display_as="IEEE") - - speech_to_text.add_word( - customization_id='custid', - word_name="IEEE", - sounds_like=["i triple e"], - display_as="IEEE") - - speech_to_text.delete_word(customization_id='custid', word_name="wordname") - - speech_to_text.delete_word(customization_id='custid', word_name='IEEE') - - custom_words = [custom_word, custom_word, custom_word] - speech_to_text.add_words( - customization_id='custid', - words=custom_words) - - speech_to_text.get_word(customization_id='custid', word_name="IEEE") - - speech_to_text.get_word(customization_id='custid', word_name='wordname') - - speech_to_text.list_words(customization_id='custid') - speech_to_text.list_words(customization_id='custid', sort='alphabetical') - - speech_to_text.list_words(customization_id='custid', word_type='all') - - assert len(responses.calls) == 9 - - -@responses.activate -def test_custom_audio_resources(): - url = 'https://stream.watsonplatform.net/speech-to-text/api/v1/acoustic_customizations/{0}/audio/{1}' - - responses.add( - responses.POST, - url.format('custid', 'hiee'), - body='{"post response": "done"}', - status=200, - content_type='application/json') - - responses.add( - responses.DELETE, - url.format('custid', 'hiee'), - body='{"delete response": "done"}', - status=200, - content_type='application/json') - - responses.add( - responses.GET, - url.format('custid', 'hiee'), - body='{"get response": "done"}', - status=200, - content_type='application/json') - - responses.add( - responses.GET, - 'https://stream.watsonplatform.net/speech-to-text/api/v1/acoustic_customizations/custid/audio', - body='{"get response all": "done"}', - status=200, - content_type='application/json') - - speech_to_text = ibm_watson.SpeechToTextV1( - username="username", password="password") - - with open(os.path.join(os.path.dirname(__file__), '../../resources/speech.wav'), 'rb') as audio_file: - speech_to_text.add_audio( - customization_id='custid', - audio_name="hiee", - audio_resource=audio_file, - content_type="application/json") - assert responses.calls[0].response.json() == {"post response": "done"} - - speech_to_text.delete_audio('custid', 'hiee') - assert responses.calls[1].response.json() == {"delete response": "done"} - - speech_to_text.get_audio('custid', 'hiee') - assert responses.calls[2].response.json() == {"get response": "done"} - - speech_to_text.list_audio('custid') - assert responses.calls[3].response.json() == {"get response all": "done"} - -@responses.activate -def test_delete_user_data(): - url = 'https://stream.watsonplatform.net/speech-to-text/api/v1/user_data' - responses.add( - responses.DELETE, - url, - body='{"description": "success" }', - status=204, - content_type='application_json') - - speech_to_text = ibm_watson.SpeechToTextV1(username="username", password="password") - response = speech_to_text.delete_user_data('id').get_result() - assert response is None - assert len(responses.calls) == 1 - -@responses.activate -def test_custom_grammars(): - url = 'https://stream.watsonplatform.net/speech-to-text/api/v1/customizations/{0}/grammars/{1}' - - responses.add( - responses.POST, - url.format('customization_id', 'grammar_name'), - body='{}', - status=200, - content_type='application/json') - - responses.add( - responses.DELETE, - url.format('customization_id', 'grammar_name'), - status=200, - content_type='application/json') - - responses.add( - responses.GET, - url.format('customization_id', 'grammar_name'), - body='{"status": "analyzed", "name": "test-add-grammar-python", "out_of_vocabulary_words": 0}', - status=200, - content_type='application/json') - - responses.add( - responses.GET, - url='https://stream.watsonplatform.net/speech-to-text/api/v1/customizations/customization_id/grammars', - body='{"grammars":[{"status": "analyzed", "name": "test-add-grammar-python", "out_of_vocabulary_words": 0}]}', - status=200, - content_type='application/json') - - speech_to_text = ibm_watson.SpeechToTextV1( - username="username", password="password") - - with open(os.path.join(os.path.dirname(__file__), '../../resources/confirm-grammar.xml'), 'rb') as grammar_file: - speech_to_text.add_grammar( - "customization_id", - grammar_name='grammar_name', - grammar_file=grammar_file, - content_type='application/srgs+xml', - allow_overwrite=True) - assert responses.calls[0].response.json() == {} - - speech_to_text.delete_grammar('customization_id', 'grammar_name') - assert responses.calls[1].response.status_code == 200 - - speech_to_text.get_grammar('customization_id', 'grammar_name') - assert responses.calls[2].response.json() == {"status": "analyzed", "name": "test-add-grammar-python", "out_of_vocabulary_words": 0} - - speech_to_text.list_grammars('customization_id') - assert responses.calls[3].response.json() == {"grammars":[{"status": "analyzed", "name": "test-add-grammar-python", "out_of_vocabulary_words": 0}]} + url, + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_language_model(**req_copy) + + def test_delete_language_model_value_error_with_retries(self): + # Enable retries and run test_delete_language_model_value_error. + _service.enable_retries() + self.test_delete_language_model_value_error() + + # Disable retries and run test_delete_language_model_value_error. + _service.disable_retries() + self.test_delete_language_model_value_error() + + +class TestTrainLanguageModel: + """ + Test Class for train_language_model + """ + + @responses.activate + def test_train_language_model_all_params(self): + """ + train_language_model() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/train') + mock_response = '{"warnings": [{"code": "invalid_audio_files", "message": "message"}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + word_type_to_add = 'all' + customization_weight = 72.5 + strict = True + force = False + + # Invoke method + response = _service.train_language_model( + customization_id, + word_type_to_add=word_type_to_add, + customization_weight=customization_weight, + strict=strict, + force=force, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'word_type_to_add={}'.format(word_type_to_add) in query_string + assert 'customization_weight={}'.format(customization_weight) in query_string + assert 'strict={}'.format('true' if strict else 'false') in query_string + assert 'force={}'.format('true' if force else 'false') in query_string + + def test_train_language_model_all_params_with_retries(self): + # Enable retries and run test_train_language_model_all_params. + _service.enable_retries() + self.test_train_language_model_all_params() + + # Disable retries and run test_train_language_model_all_params. + _service.disable_retries() + self.test_train_language_model_all_params() + + @responses.activate + def test_train_language_model_required_params(self): + """ + test_train_language_model_required_params() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/train') + mock_response = '{"warnings": [{"code": "invalid_audio_files", "message": "message"}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + + # Invoke method + response = _service.train_language_model( + customization_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_train_language_model_required_params_with_retries(self): + # Enable retries and run test_train_language_model_required_params. + _service.enable_retries() + self.test_train_language_model_required_params() + + # Disable retries and run test_train_language_model_required_params. + _service.disable_retries() + self.test_train_language_model_required_params() + + @responses.activate + def test_train_language_model_value_error(self): + """ + test_train_language_model_value_error() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/train') + mock_response = '{"warnings": [{"code": "invalid_audio_files", "message": "message"}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.train_language_model(**req_copy) + + def test_train_language_model_value_error_with_retries(self): + # Enable retries and run test_train_language_model_value_error. + _service.enable_retries() + self.test_train_language_model_value_error() + + # Disable retries and run test_train_language_model_value_error. + _service.disable_retries() + self.test_train_language_model_value_error() + + +class TestResetLanguageModel: + """ + Test Class for reset_language_model + """ + + @responses.activate + def test_reset_language_model_all_params(self): + """ + reset_language_model() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/reset') + responses.add( + responses.POST, + url, + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + + # Invoke method + response = _service.reset_language_model( + customization_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_reset_language_model_all_params_with_retries(self): + # Enable retries and run test_reset_language_model_all_params. + _service.enable_retries() + self.test_reset_language_model_all_params() + + # Disable retries and run test_reset_language_model_all_params. + _service.disable_retries() + self.test_reset_language_model_all_params() + + @responses.activate + def test_reset_language_model_value_error(self): + """ + test_reset_language_model_value_error() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/reset') + responses.add( + responses.POST, + url, + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.reset_language_model(**req_copy) + + def test_reset_language_model_value_error_with_retries(self): + # Enable retries and run test_reset_language_model_value_error. + _service.enable_retries() + self.test_reset_language_model_value_error() + + # Disable retries and run test_reset_language_model_value_error. + _service.disable_retries() + self.test_reset_language_model_value_error() + + +class TestUpgradeLanguageModel: + """ + Test Class for upgrade_language_model + """ + + @responses.activate + def test_upgrade_language_model_all_params(self): + """ + upgrade_language_model() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/upgrade_model') + responses.add( + responses.POST, + url, + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + + # Invoke method + response = _service.upgrade_language_model( + customization_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_upgrade_language_model_all_params_with_retries(self): + # Enable retries and run test_upgrade_language_model_all_params. + _service.enable_retries() + self.test_upgrade_language_model_all_params() + + # Disable retries and run test_upgrade_language_model_all_params. + _service.disable_retries() + self.test_upgrade_language_model_all_params() + + @responses.activate + def test_upgrade_language_model_value_error(self): + """ + test_upgrade_language_model_value_error() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/upgrade_model') + responses.add( + responses.POST, + url, + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.upgrade_language_model(**req_copy) + + def test_upgrade_language_model_value_error_with_retries(self): + # Enable retries and run test_upgrade_language_model_value_error. + _service.enable_retries() + self.test_upgrade_language_model_value_error() + + # Disable retries and run test_upgrade_language_model_value_error. + _service.disable_retries() + self.test_upgrade_language_model_value_error() + + +# endregion +############################################################################## +# End of Service: CustomLanguageModels +############################################################################## + +############################################################################## +# Start of Service: CustomCorpora +############################################################################## +# region + + +class TestListCorpora: + """ + Test Class for list_corpora + """ + + @responses.activate + def test_list_corpora_all_params(self): + """ + list_corpora() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/corpora') + mock_response = '{"corpora": [{"name": "name", "total_words": 11, "out_of_vocabulary_words": 23, "status": "analyzed", "error": "error"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + + # Invoke method + response = _service.list_corpora( + customization_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_corpora_all_params_with_retries(self): + # Enable retries and run test_list_corpora_all_params. + _service.enable_retries() + self.test_list_corpora_all_params() + + # Disable retries and run test_list_corpora_all_params. + _service.disable_retries() + self.test_list_corpora_all_params() + + @responses.activate + def test_list_corpora_value_error(self): + """ + test_list_corpora_value_error() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/corpora') + mock_response = '{"corpora": [{"name": "name", "total_words": 11, "out_of_vocabulary_words": 23, "status": "analyzed", "error": "error"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.list_corpora(**req_copy) + + def test_list_corpora_value_error_with_retries(self): + # Enable retries and run test_list_corpora_value_error. + _service.enable_retries() + self.test_list_corpora_value_error() + + # Disable retries and run test_list_corpora_value_error. + _service.disable_retries() + self.test_list_corpora_value_error() + + +class TestAddCorpus: + """ + Test Class for add_corpus + """ + + @responses.activate + def test_add_corpus_all_params(self): + """ + add_corpus() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/corpora/testString') + responses.add( + responses.POST, + url, + status=201, + ) + + # Set up parameter values + customization_id = 'testString' + corpus_name = 'testString' + corpus_file = io.BytesIO(b'This is a mock file.').getvalue() + allow_overwrite = False + + # Invoke method + response = _service.add_corpus( + customization_id, + corpus_name, + corpus_file, + allow_overwrite=allow_overwrite, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'allow_overwrite={}'.format('true' if allow_overwrite else 'false') in query_string + + def test_add_corpus_all_params_with_retries(self): + # Enable retries and run test_add_corpus_all_params. + _service.enable_retries() + self.test_add_corpus_all_params() + + # Disable retries and run test_add_corpus_all_params. + _service.disable_retries() + self.test_add_corpus_all_params() + + @responses.activate + def test_add_corpus_required_params(self): + """ + test_add_corpus_required_params() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/corpora/testString') + responses.add( + responses.POST, + url, + status=201, + ) + + # Set up parameter values + customization_id = 'testString' + corpus_name = 'testString' + corpus_file = io.BytesIO(b'This is a mock file.').getvalue() + + # Invoke method + response = _service.add_corpus( + customization_id, + corpus_name, + corpus_file, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + + def test_add_corpus_required_params_with_retries(self): + # Enable retries and run test_add_corpus_required_params. + _service.enable_retries() + self.test_add_corpus_required_params() + + # Disable retries and run test_add_corpus_required_params. + _service.disable_retries() + self.test_add_corpus_required_params() + + @responses.activate + def test_add_corpus_value_error(self): + """ + test_add_corpus_value_error() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/corpora/testString') + responses.add( + responses.POST, + url, + status=201, + ) + + # Set up parameter values + customization_id = 'testString' + corpus_name = 'testString' + corpus_file = io.BytesIO(b'This is a mock file.').getvalue() + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + "corpus_name": corpus_name, + "corpus_file": corpus_file, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.add_corpus(**req_copy) + + def test_add_corpus_value_error_with_retries(self): + # Enable retries and run test_add_corpus_value_error. + _service.enable_retries() + self.test_add_corpus_value_error() + + # Disable retries and run test_add_corpus_value_error. + _service.disable_retries() + self.test_add_corpus_value_error() + + +class TestGetCorpus: + """ + Test Class for get_corpus + """ + + @responses.activate + def test_get_corpus_all_params(self): + """ + get_corpus() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/corpora/testString') + mock_response = '{"name": "name", "total_words": 11, "out_of_vocabulary_words": 23, "status": "analyzed", "error": "error"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + corpus_name = 'testString' + + # Invoke method + response = _service.get_corpus( + customization_id, + corpus_name, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_get_corpus_all_params_with_retries(self): + # Enable retries and run test_get_corpus_all_params. + _service.enable_retries() + self.test_get_corpus_all_params() + + # Disable retries and run test_get_corpus_all_params. + _service.disable_retries() + self.test_get_corpus_all_params() + + @responses.activate + def test_get_corpus_value_error(self): + """ + test_get_corpus_value_error() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/corpora/testString') + mock_response = '{"name": "name", "total_words": 11, "out_of_vocabulary_words": 23, "status": "analyzed", "error": "error"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + corpus_name = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + "corpus_name": corpus_name, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_corpus(**req_copy) + + def test_get_corpus_value_error_with_retries(self): + # Enable retries and run test_get_corpus_value_error. + _service.enable_retries() + self.test_get_corpus_value_error() + + # Disable retries and run test_get_corpus_value_error. + _service.disable_retries() + self.test_get_corpus_value_error() + + +class TestDeleteCorpus: + """ + Test Class for delete_corpus + """ + + @responses.activate + def test_delete_corpus_all_params(self): + """ + delete_corpus() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/corpora/testString') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + corpus_name = 'testString' + + # Invoke method + response = _service.delete_corpus( + customization_id, + corpus_name, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_delete_corpus_all_params_with_retries(self): + # Enable retries and run test_delete_corpus_all_params. + _service.enable_retries() + self.test_delete_corpus_all_params() + + # Disable retries and run test_delete_corpus_all_params. + _service.disable_retries() + self.test_delete_corpus_all_params() + + @responses.activate + def test_delete_corpus_value_error(self): + """ + test_delete_corpus_value_error() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/corpora/testString') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + corpus_name = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + "corpus_name": corpus_name, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_corpus(**req_copy) + + def test_delete_corpus_value_error_with_retries(self): + # Enable retries and run test_delete_corpus_value_error. + _service.enable_retries() + self.test_delete_corpus_value_error() + + # Disable retries and run test_delete_corpus_value_error. + _service.disable_retries() + self.test_delete_corpus_value_error() + + +# endregion +############################################################################## +# End of Service: CustomCorpora +############################################################################## + +############################################################################## +# Start of Service: CustomWords +############################################################################## +# region + + +class TestListWords: + """ + Test Class for list_words + """ + + @responses.activate + def test_list_words_all_params(self): + """ + list_words() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/words') + mock_response = '{"words": [{"word": "word", "mapping_only": ["mapping_only"], "sounds_like": ["sounds_like"], "display_as": "display_as", "count": 5, "source": ["source"], "error": [{"element": "element"}]}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + word_type = 'all' + sort = 'alphabetical' + + # Invoke method + response = _service.list_words( + customization_id, + word_type=word_type, + sort=sort, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'word_type={}'.format(word_type) in query_string + assert 'sort={}'.format(sort) in query_string + + def test_list_words_all_params_with_retries(self): + # Enable retries and run test_list_words_all_params. + _service.enable_retries() + self.test_list_words_all_params() + + # Disable retries and run test_list_words_all_params. + _service.disable_retries() + self.test_list_words_all_params() + + @responses.activate + def test_list_words_required_params(self): + """ + test_list_words_required_params() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/words') + mock_response = '{"words": [{"word": "word", "mapping_only": ["mapping_only"], "sounds_like": ["sounds_like"], "display_as": "display_as", "count": 5, "source": ["source"], "error": [{"element": "element"}]}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + + # Invoke method + response = _service.list_words( + customization_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_words_required_params_with_retries(self): + # Enable retries and run test_list_words_required_params. + _service.enable_retries() + self.test_list_words_required_params() + + # Disable retries and run test_list_words_required_params. + _service.disable_retries() + self.test_list_words_required_params() + + @responses.activate + def test_list_words_value_error(self): + """ + test_list_words_value_error() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/words') + mock_response = '{"words": [{"word": "word", "mapping_only": ["mapping_only"], "sounds_like": ["sounds_like"], "display_as": "display_as", "count": 5, "source": ["source"], "error": [{"element": "element"}]}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.list_words(**req_copy) + + def test_list_words_value_error_with_retries(self): + # Enable retries and run test_list_words_value_error. + _service.enable_retries() + self.test_list_words_value_error() + + # Disable retries and run test_list_words_value_error. + _service.disable_retries() + self.test_list_words_value_error() + + +class TestAddWords: + """ + Test Class for add_words + """ + + @responses.activate + def test_add_words_all_params(self): + """ + add_words() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/words') + responses.add( + responses.POST, + url, + status=201, + ) + + # Construct a dict representation of a CustomWord model + custom_word_model = {} + custom_word_model['word'] = 'testString' + custom_word_model['mapping_only'] = ['testString'] + custom_word_model['sounds_like'] = ['testString'] + custom_word_model['display_as'] = 'testString' + + # Set up parameter values + customization_id = 'testString' + words = [custom_word_model] + + # Invoke method + response = _service.add_words( + customization_id, + words, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['words'] == [custom_word_model] + + def test_add_words_all_params_with_retries(self): + # Enable retries and run test_add_words_all_params. + _service.enable_retries() + self.test_add_words_all_params() + + # Disable retries and run test_add_words_all_params. + _service.disable_retries() + self.test_add_words_all_params() + + @responses.activate + def test_add_words_value_error(self): + """ + test_add_words_value_error() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/words') + responses.add( + responses.POST, + url, + status=201, + ) + + # Construct a dict representation of a CustomWord model + custom_word_model = {} + custom_word_model['word'] = 'testString' + custom_word_model['mapping_only'] = ['testString'] + custom_word_model['sounds_like'] = ['testString'] + custom_word_model['display_as'] = 'testString' + + # Set up parameter values + customization_id = 'testString' + words = [custom_word_model] + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + "words": words, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.add_words(**req_copy) + + def test_add_words_value_error_with_retries(self): + # Enable retries and run test_add_words_value_error. + _service.enable_retries() + self.test_add_words_value_error() + + # Disable retries and run test_add_words_value_error. + _service.disable_retries() + self.test_add_words_value_error() + + +class TestAddWord: + """ + Test Class for add_word + """ + + @responses.activate + def test_add_word_all_params(self): + """ + add_word() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/words/testString') + responses.add( + responses.PUT, + url, + status=201, + ) + + # Set up parameter values + customization_id = 'testString' + word_name = 'testString' + word = 'testString' + mapping_only = ['testString'] + sounds_like = ['testString'] + display_as = 'testString' + + # Invoke method + response = _service.add_word( + customization_id, + word_name, + word=word, + mapping_only=mapping_only, + sounds_like=sounds_like, + display_as=display_as, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['word'] == 'testString' + assert req_body['mapping_only'] == ['testString'] + assert req_body['sounds_like'] == ['testString'] + assert req_body['display_as'] == 'testString' + + def test_add_word_all_params_with_retries(self): + # Enable retries and run test_add_word_all_params. + _service.enable_retries() + self.test_add_word_all_params() + + # Disable retries and run test_add_word_all_params. + _service.disable_retries() + self.test_add_word_all_params() + + @responses.activate + def test_add_word_value_error(self): + """ + test_add_word_value_error() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/words/testString') + responses.add( + responses.PUT, + url, + status=201, + ) + + # Set up parameter values + customization_id = 'testString' + word_name = 'testString' + word = 'testString' + mapping_only = ['testString'] + sounds_like = ['testString'] + display_as = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + "word_name": word_name, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.add_word(**req_copy) + + def test_add_word_value_error_with_retries(self): + # Enable retries and run test_add_word_value_error. + _service.enable_retries() + self.test_add_word_value_error() + + # Disable retries and run test_add_word_value_error. + _service.disable_retries() + self.test_add_word_value_error() + + +class TestGetWord: + """ + Test Class for get_word + """ + + @responses.activate + def test_get_word_all_params(self): + """ + get_word() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/words/testString') + mock_response = '{"word": "word", "mapping_only": ["mapping_only"], "sounds_like": ["sounds_like"], "display_as": "display_as", "count": 5, "source": ["source"], "error": [{"element": "element"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + word_name = 'testString' + + # Invoke method + response = _service.get_word( + customization_id, + word_name, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_get_word_all_params_with_retries(self): + # Enable retries and run test_get_word_all_params. + _service.enable_retries() + self.test_get_word_all_params() + + # Disable retries and run test_get_word_all_params. + _service.disable_retries() + self.test_get_word_all_params() + + @responses.activate + def test_get_word_value_error(self): + """ + test_get_word_value_error() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/words/testString') + mock_response = '{"word": "word", "mapping_only": ["mapping_only"], "sounds_like": ["sounds_like"], "display_as": "display_as", "count": 5, "source": ["source"], "error": [{"element": "element"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + word_name = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + "word_name": word_name, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_word(**req_copy) + + def test_get_word_value_error_with_retries(self): + # Enable retries and run test_get_word_value_error. + _service.enable_retries() + self.test_get_word_value_error() + + # Disable retries and run test_get_word_value_error. + _service.disable_retries() + self.test_get_word_value_error() + + +class TestDeleteWord: + """ + Test Class for delete_word + """ + + @responses.activate + def test_delete_word_all_params(self): + """ + delete_word() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/words/testString') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + word_name = 'testString' + + # Invoke method + response = _service.delete_word( + customization_id, + word_name, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_delete_word_all_params_with_retries(self): + # Enable retries and run test_delete_word_all_params. + _service.enable_retries() + self.test_delete_word_all_params() + + # Disable retries and run test_delete_word_all_params. + _service.disable_retries() + self.test_delete_word_all_params() + + @responses.activate + def test_delete_word_value_error(self): + """ + test_delete_word_value_error() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/words/testString') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + word_name = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + "word_name": word_name, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_word(**req_copy) + + def test_delete_word_value_error_with_retries(self): + # Enable retries and run test_delete_word_value_error. + _service.enable_retries() + self.test_delete_word_value_error() + + # Disable retries and run test_delete_word_value_error. + _service.disable_retries() + self.test_delete_word_value_error() + + +# endregion +############################################################################## +# End of Service: CustomWords +############################################################################## + +############################################################################## +# Start of Service: CustomGrammars +############################################################################## +# region + + +class TestListGrammars: + """ + Test Class for list_grammars + """ + + @responses.activate + def test_list_grammars_all_params(self): + """ + list_grammars() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/grammars') + mock_response = '{"grammars": [{"name": "name", "out_of_vocabulary_words": 23, "status": "analyzed", "error": "error"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + + # Invoke method + response = _service.list_grammars( + customization_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_grammars_all_params_with_retries(self): + # Enable retries and run test_list_grammars_all_params. + _service.enable_retries() + self.test_list_grammars_all_params() + + # Disable retries and run test_list_grammars_all_params. + _service.disable_retries() + self.test_list_grammars_all_params() + + @responses.activate + def test_list_grammars_value_error(self): + """ + test_list_grammars_value_error() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/grammars') + mock_response = '{"grammars": [{"name": "name", "out_of_vocabulary_words": 23, "status": "analyzed", "error": "error"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.list_grammars(**req_copy) + + def test_list_grammars_value_error_with_retries(self): + # Enable retries and run test_list_grammars_value_error. + _service.enable_retries() + self.test_list_grammars_value_error() + + # Disable retries and run test_list_grammars_value_error. + _service.disable_retries() + self.test_list_grammars_value_error() + + +class TestAddGrammar: + """ + Test Class for add_grammar + """ + + @responses.activate + def test_add_grammar_all_params(self): + """ + add_grammar() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/grammars/testString') + responses.add( + responses.POST, + url, + status=201, + ) + + # Set up parameter values + customization_id = 'testString' + grammar_name = 'testString' + grammar_file = io.BytesIO(b'This is a mock file.').getvalue() + content_type = 'application/srgs' + allow_overwrite = False + + # Invoke method + response = _service.add_grammar( + customization_id, + grammar_name, + grammar_file, + content_type, + allow_overwrite=allow_overwrite, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'allow_overwrite={}'.format('true' if allow_overwrite else 'false') in query_string + # Validate body params + + def test_add_grammar_all_params_with_retries(self): + # Enable retries and run test_add_grammar_all_params. + _service.enable_retries() + self.test_add_grammar_all_params() + + # Disable retries and run test_add_grammar_all_params. + _service.disable_retries() + self.test_add_grammar_all_params() + + @responses.activate + def test_add_grammar_required_params(self): + """ + test_add_grammar_required_params() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/grammars/testString') + responses.add( + responses.POST, + url, + status=201, + ) + + # Set up parameter values + customization_id = 'testString' + grammar_name = 'testString' + grammar_file = io.BytesIO(b'This is a mock file.').getvalue() + content_type = 'application/srgs' + + # Invoke method + response = _service.add_grammar( + customization_id, + grammar_name, + grammar_file, + content_type, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + # Validate body params + + def test_add_grammar_required_params_with_retries(self): + # Enable retries and run test_add_grammar_required_params. + _service.enable_retries() + self.test_add_grammar_required_params() + + # Disable retries and run test_add_grammar_required_params. + _service.disable_retries() + self.test_add_grammar_required_params() + + @responses.activate + def test_add_grammar_value_error(self): + """ + test_add_grammar_value_error() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/grammars/testString') + responses.add( + responses.POST, + url, + status=201, + ) + + # Set up parameter values + customization_id = 'testString' + grammar_name = 'testString' + grammar_file = io.BytesIO(b'This is a mock file.').getvalue() + content_type = 'application/srgs' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + "grammar_name": grammar_name, + "grammar_file": grammar_file, + "content_type": content_type, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.add_grammar(**req_copy) + + def test_add_grammar_value_error_with_retries(self): + # Enable retries and run test_add_grammar_value_error. + _service.enable_retries() + self.test_add_grammar_value_error() + + # Disable retries and run test_add_grammar_value_error. + _service.disable_retries() + self.test_add_grammar_value_error() + + +class TestGetGrammar: + """ + Test Class for get_grammar + """ + + @responses.activate + def test_get_grammar_all_params(self): + """ + get_grammar() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/grammars/testString') + mock_response = '{"name": "name", "out_of_vocabulary_words": 23, "status": "analyzed", "error": "error"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + grammar_name = 'testString' + + # Invoke method + response = _service.get_grammar( + customization_id, + grammar_name, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_get_grammar_all_params_with_retries(self): + # Enable retries and run test_get_grammar_all_params. + _service.enable_retries() + self.test_get_grammar_all_params() + + # Disable retries and run test_get_grammar_all_params. + _service.disable_retries() + self.test_get_grammar_all_params() + + @responses.activate + def test_get_grammar_value_error(self): + """ + test_get_grammar_value_error() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/grammars/testString') + mock_response = '{"name": "name", "out_of_vocabulary_words": 23, "status": "analyzed", "error": "error"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + grammar_name = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + "grammar_name": grammar_name, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_grammar(**req_copy) + + def test_get_grammar_value_error_with_retries(self): + # Enable retries and run test_get_grammar_value_error. + _service.enable_retries() + self.test_get_grammar_value_error() + + # Disable retries and run test_get_grammar_value_error. + _service.disable_retries() + self.test_get_grammar_value_error() + + +class TestDeleteGrammar: + """ + Test Class for delete_grammar + """ + + @responses.activate + def test_delete_grammar_all_params(self): + """ + delete_grammar() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/grammars/testString') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + grammar_name = 'testString' + + # Invoke method + response = _service.delete_grammar( + customization_id, + grammar_name, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_delete_grammar_all_params_with_retries(self): + # Enable retries and run test_delete_grammar_all_params. + _service.enable_retries() + self.test_delete_grammar_all_params() + + # Disable retries and run test_delete_grammar_all_params. + _service.disable_retries() + self.test_delete_grammar_all_params() + + @responses.activate + def test_delete_grammar_value_error(self): + """ + test_delete_grammar_value_error() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/grammars/testString') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + grammar_name = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + "grammar_name": grammar_name, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_grammar(**req_copy) + + def test_delete_grammar_value_error_with_retries(self): + # Enable retries and run test_delete_grammar_value_error. + _service.enable_retries() + self.test_delete_grammar_value_error() + + # Disable retries and run test_delete_grammar_value_error. + _service.disable_retries() + self.test_delete_grammar_value_error() + + +# endregion +############################################################################## +# End of Service: CustomGrammars +############################################################################## + +############################################################################## +# Start of Service: CustomAcousticModels +############################################################################## +# region + + +class TestCreateAcousticModel: + """ + Test Class for create_acoustic_model + """ + + @responses.activate + def test_create_acoustic_model_all_params(self): + """ + create_acoustic_model() + """ + # Set up mock + url = preprocess_url('/v1/acoustic_customizations') + mock_response = '{"customization_id": "customization_id", "created": "created", "updated": "updated", "language": "language", "versions": ["versions"], "owner": "owner", "name": "name", "description": "description", "base_model_name": "base_model_name", "status": "pending", "progress": 8, "warnings": "warnings"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Set up parameter values + name = 'testString' + base_model_name = 'ar-MS_BroadbandModel' + description = 'testString' + + # Invoke method + response = _service.create_acoustic_model( + name, + base_model_name, + description=description, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['name'] == 'testString' + assert req_body['base_model_name'] == 'ar-MS_BroadbandModel' + assert req_body['description'] == 'testString' + + def test_create_acoustic_model_all_params_with_retries(self): + # Enable retries and run test_create_acoustic_model_all_params. + _service.enable_retries() + self.test_create_acoustic_model_all_params() + + # Disable retries and run test_create_acoustic_model_all_params. + _service.disable_retries() + self.test_create_acoustic_model_all_params() + + @responses.activate + def test_create_acoustic_model_value_error(self): + """ + test_create_acoustic_model_value_error() + """ + # Set up mock + url = preprocess_url('/v1/acoustic_customizations') + mock_response = '{"customization_id": "customization_id", "created": "created", "updated": "updated", "language": "language", "versions": ["versions"], "owner": "owner", "name": "name", "description": "description", "base_model_name": "base_model_name", "status": "pending", "progress": 8, "warnings": "warnings"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Set up parameter values + name = 'testString' + base_model_name = 'ar-MS_BroadbandModel' + description = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "name": name, + "base_model_name": base_model_name, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.create_acoustic_model(**req_copy) + + def test_create_acoustic_model_value_error_with_retries(self): + # Enable retries and run test_create_acoustic_model_value_error. + _service.enable_retries() + self.test_create_acoustic_model_value_error() + + # Disable retries and run test_create_acoustic_model_value_error. + _service.disable_retries() + self.test_create_acoustic_model_value_error() + + +class TestListAcousticModels: + """ + Test Class for list_acoustic_models + """ + + @responses.activate + def test_list_acoustic_models_all_params(self): + """ + list_acoustic_models() + """ + # Set up mock + url = preprocess_url('/v1/acoustic_customizations') + mock_response = '{"customizations": [{"customization_id": "customization_id", "created": "created", "updated": "updated", "language": "language", "versions": ["versions"], "owner": "owner", "name": "name", "description": "description", "base_model_name": "base_model_name", "status": "pending", "progress": 8, "warnings": "warnings"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + language = 'ar-MS' + + # Invoke method + response = _service.list_acoustic_models( + language=language, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'language={}'.format(language) in query_string + + def test_list_acoustic_models_all_params_with_retries(self): + # Enable retries and run test_list_acoustic_models_all_params. + _service.enable_retries() + self.test_list_acoustic_models_all_params() + + # Disable retries and run test_list_acoustic_models_all_params. + _service.disable_retries() + self.test_list_acoustic_models_all_params() + + @responses.activate + def test_list_acoustic_models_required_params(self): + """ + test_list_acoustic_models_required_params() + """ + # Set up mock + url = preprocess_url('/v1/acoustic_customizations') + mock_response = '{"customizations": [{"customization_id": "customization_id", "created": "created", "updated": "updated", "language": "language", "versions": ["versions"], "owner": "owner", "name": "name", "description": "description", "base_model_name": "base_model_name", "status": "pending", "progress": 8, "warnings": "warnings"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Invoke method + response = _service.list_acoustic_models() + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_acoustic_models_required_params_with_retries(self): + # Enable retries and run test_list_acoustic_models_required_params. + _service.enable_retries() + self.test_list_acoustic_models_required_params() + + # Disable retries and run test_list_acoustic_models_required_params. + _service.disable_retries() + self.test_list_acoustic_models_required_params() + + +class TestGetAcousticModel: + """ + Test Class for get_acoustic_model + """ + + @responses.activate + def test_get_acoustic_model_all_params(self): + """ + get_acoustic_model() + """ + # Set up mock + url = preprocess_url('/v1/acoustic_customizations/testString') + mock_response = '{"customization_id": "customization_id", "created": "created", "updated": "updated", "language": "language", "versions": ["versions"], "owner": "owner", "name": "name", "description": "description", "base_model_name": "base_model_name", "status": "pending", "progress": 8, "warnings": "warnings"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + + # Invoke method + response = _service.get_acoustic_model( + customization_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_get_acoustic_model_all_params_with_retries(self): + # Enable retries and run test_get_acoustic_model_all_params. + _service.enable_retries() + self.test_get_acoustic_model_all_params() + + # Disable retries and run test_get_acoustic_model_all_params. + _service.disable_retries() + self.test_get_acoustic_model_all_params() + + @responses.activate + def test_get_acoustic_model_value_error(self): + """ + test_get_acoustic_model_value_error() + """ + # Set up mock + url = preprocess_url('/v1/acoustic_customizations/testString') + mock_response = '{"customization_id": "customization_id", "created": "created", "updated": "updated", "language": "language", "versions": ["versions"], "owner": "owner", "name": "name", "description": "description", "base_model_name": "base_model_name", "status": "pending", "progress": 8, "warnings": "warnings"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_acoustic_model(**req_copy) + + def test_get_acoustic_model_value_error_with_retries(self): + # Enable retries and run test_get_acoustic_model_value_error. + _service.enable_retries() + self.test_get_acoustic_model_value_error() + + # Disable retries and run test_get_acoustic_model_value_error. + _service.disable_retries() + self.test_get_acoustic_model_value_error() + + +class TestDeleteAcousticModel: + """ + Test Class for delete_acoustic_model + """ + + @responses.activate + def test_delete_acoustic_model_all_params(self): + """ + delete_acoustic_model() + """ + # Set up mock + url = preprocess_url('/v1/acoustic_customizations/testString') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + + # Invoke method + response = _service.delete_acoustic_model( + customization_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_delete_acoustic_model_all_params_with_retries(self): + # Enable retries and run test_delete_acoustic_model_all_params. + _service.enable_retries() + self.test_delete_acoustic_model_all_params() + + # Disable retries and run test_delete_acoustic_model_all_params. + _service.disable_retries() + self.test_delete_acoustic_model_all_params() + + @responses.activate + def test_delete_acoustic_model_value_error(self): + """ + test_delete_acoustic_model_value_error() + """ + # Set up mock + url = preprocess_url('/v1/acoustic_customizations/testString') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_acoustic_model(**req_copy) + + def test_delete_acoustic_model_value_error_with_retries(self): + # Enable retries and run test_delete_acoustic_model_value_error. + _service.enable_retries() + self.test_delete_acoustic_model_value_error() + + # Disable retries and run test_delete_acoustic_model_value_error. + _service.disable_retries() + self.test_delete_acoustic_model_value_error() + + +class TestTrainAcousticModel: + """ + Test Class for train_acoustic_model + """ + + @responses.activate + def test_train_acoustic_model_all_params(self): + """ + train_acoustic_model() + """ + # Set up mock + url = preprocess_url('/v1/acoustic_customizations/testString/train') + mock_response = '{"warnings": [{"code": "invalid_audio_files", "message": "message"}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + custom_language_model_id = 'testString' + strict = True + + # Invoke method + response = _service.train_acoustic_model( + customization_id, + custom_language_model_id=custom_language_model_id, + strict=strict, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'custom_language_model_id={}'.format(custom_language_model_id) in query_string + assert 'strict={}'.format('true' if strict else 'false') in query_string + + def test_train_acoustic_model_all_params_with_retries(self): + # Enable retries and run test_train_acoustic_model_all_params. + _service.enable_retries() + self.test_train_acoustic_model_all_params() + + # Disable retries and run test_train_acoustic_model_all_params. + _service.disable_retries() + self.test_train_acoustic_model_all_params() + + @responses.activate + def test_train_acoustic_model_required_params(self): + """ + test_train_acoustic_model_required_params() + """ + # Set up mock + url = preprocess_url('/v1/acoustic_customizations/testString/train') + mock_response = '{"warnings": [{"code": "invalid_audio_files", "message": "message"}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + + # Invoke method + response = _service.train_acoustic_model( + customization_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_train_acoustic_model_required_params_with_retries(self): + # Enable retries and run test_train_acoustic_model_required_params. + _service.enable_retries() + self.test_train_acoustic_model_required_params() + + # Disable retries and run test_train_acoustic_model_required_params. + _service.disable_retries() + self.test_train_acoustic_model_required_params() + + @responses.activate + def test_train_acoustic_model_value_error(self): + """ + test_train_acoustic_model_value_error() + """ + # Set up mock + url = preprocess_url('/v1/acoustic_customizations/testString/train') + mock_response = '{"warnings": [{"code": "invalid_audio_files", "message": "message"}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.train_acoustic_model(**req_copy) + + def test_train_acoustic_model_value_error_with_retries(self): + # Enable retries and run test_train_acoustic_model_value_error. + _service.enable_retries() + self.test_train_acoustic_model_value_error() + + # Disable retries and run test_train_acoustic_model_value_error. + _service.disable_retries() + self.test_train_acoustic_model_value_error() + + +class TestResetAcousticModel: + """ + Test Class for reset_acoustic_model + """ + + @responses.activate + def test_reset_acoustic_model_all_params(self): + """ + reset_acoustic_model() + """ + # Set up mock + url = preprocess_url('/v1/acoustic_customizations/testString/reset') + responses.add( + responses.POST, + url, + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + + # Invoke method + response = _service.reset_acoustic_model( + customization_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_reset_acoustic_model_all_params_with_retries(self): + # Enable retries and run test_reset_acoustic_model_all_params. + _service.enable_retries() + self.test_reset_acoustic_model_all_params() + + # Disable retries and run test_reset_acoustic_model_all_params. + _service.disable_retries() + self.test_reset_acoustic_model_all_params() + + @responses.activate + def test_reset_acoustic_model_value_error(self): + """ + test_reset_acoustic_model_value_error() + """ + # Set up mock + url = preprocess_url('/v1/acoustic_customizations/testString/reset') + responses.add( + responses.POST, + url, + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.reset_acoustic_model(**req_copy) + + def test_reset_acoustic_model_value_error_with_retries(self): + # Enable retries and run test_reset_acoustic_model_value_error. + _service.enable_retries() + self.test_reset_acoustic_model_value_error() + + # Disable retries and run test_reset_acoustic_model_value_error. + _service.disable_retries() + self.test_reset_acoustic_model_value_error() + + +class TestUpgradeAcousticModel: + """ + Test Class for upgrade_acoustic_model + """ + + @responses.activate + def test_upgrade_acoustic_model_all_params(self): + """ + upgrade_acoustic_model() + """ + # Set up mock + url = preprocess_url('/v1/acoustic_customizations/testString/upgrade_model') + responses.add( + responses.POST, + url, + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + custom_language_model_id = 'testString' + force = False + + # Invoke method + response = _service.upgrade_acoustic_model( + customization_id, + custom_language_model_id=custom_language_model_id, + force=force, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'custom_language_model_id={}'.format(custom_language_model_id) in query_string + assert 'force={}'.format('true' if force else 'false') in query_string + + def test_upgrade_acoustic_model_all_params_with_retries(self): + # Enable retries and run test_upgrade_acoustic_model_all_params. + _service.enable_retries() + self.test_upgrade_acoustic_model_all_params() + + # Disable retries and run test_upgrade_acoustic_model_all_params. + _service.disable_retries() + self.test_upgrade_acoustic_model_all_params() + + @responses.activate + def test_upgrade_acoustic_model_required_params(self): + """ + test_upgrade_acoustic_model_required_params() + """ + # Set up mock + url = preprocess_url('/v1/acoustic_customizations/testString/upgrade_model') + responses.add( + responses.POST, + url, + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + + # Invoke method + response = _service.upgrade_acoustic_model( + customization_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_upgrade_acoustic_model_required_params_with_retries(self): + # Enable retries and run test_upgrade_acoustic_model_required_params. + _service.enable_retries() + self.test_upgrade_acoustic_model_required_params() + + # Disable retries and run test_upgrade_acoustic_model_required_params. + _service.disable_retries() + self.test_upgrade_acoustic_model_required_params() + + @responses.activate + def test_upgrade_acoustic_model_value_error(self): + """ + test_upgrade_acoustic_model_value_error() + """ + # Set up mock + url = preprocess_url('/v1/acoustic_customizations/testString/upgrade_model') + responses.add( + responses.POST, + url, + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.upgrade_acoustic_model(**req_copy) + + def test_upgrade_acoustic_model_value_error_with_retries(self): + # Enable retries and run test_upgrade_acoustic_model_value_error. + _service.enable_retries() + self.test_upgrade_acoustic_model_value_error() + + # Disable retries and run test_upgrade_acoustic_model_value_error. + _service.disable_retries() + self.test_upgrade_acoustic_model_value_error() + + +# endregion +############################################################################## +# End of Service: CustomAcousticModels +############################################################################## + +############################################################################## +# Start of Service: CustomAudioResources +############################################################################## +# region + + +class TestListAudio: + """ + Test Class for list_audio + """ + + @responses.activate + def test_list_audio_all_params(self): + """ + list_audio() + """ + # Set up mock + url = preprocess_url('/v1/acoustic_customizations/testString/audio') + mock_response = '{"total_minutes_of_audio": 22, "audio": [{"duration": 8, "name": "name", "details": {"type": "audio", "codec": "codec", "frequency": 9, "compression": "zip"}, "status": "ok"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + + # Invoke method + response = _service.list_audio( + customization_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_audio_all_params_with_retries(self): + # Enable retries and run test_list_audio_all_params. + _service.enable_retries() + self.test_list_audio_all_params() + + # Disable retries and run test_list_audio_all_params. + _service.disable_retries() + self.test_list_audio_all_params() + + @responses.activate + def test_list_audio_value_error(self): + """ + test_list_audio_value_error() + """ + # Set up mock + url = preprocess_url('/v1/acoustic_customizations/testString/audio') + mock_response = '{"total_minutes_of_audio": 22, "audio": [{"duration": 8, "name": "name", "details": {"type": "audio", "codec": "codec", "frequency": 9, "compression": "zip"}, "status": "ok"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.list_audio(**req_copy) + + def test_list_audio_value_error_with_retries(self): + # Enable retries and run test_list_audio_value_error. + _service.enable_retries() + self.test_list_audio_value_error() + + # Disable retries and run test_list_audio_value_error. + _service.disable_retries() + self.test_list_audio_value_error() + + +class TestAddAudio: + """ + Test Class for add_audio + """ + + @responses.activate + def test_add_audio_all_params(self): + """ + add_audio() + """ + # Set up mock + url = preprocess_url('/v1/acoustic_customizations/testString/audio/testString') + responses.add( + responses.POST, + url, + status=201, + ) + + # Set up parameter values + customization_id = 'testString' + audio_name = 'testString' + audio_resource = io.BytesIO(b'This is a mock file.').getvalue() + content_type = 'application/zip' + contained_content_type = 'audio/alaw' + allow_overwrite = False + + # Invoke method + response = _service.add_audio( + customization_id, + audio_name, + audio_resource, + content_type=content_type, + contained_content_type=contained_content_type, + allow_overwrite=allow_overwrite, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'allow_overwrite={}'.format('true' if allow_overwrite else 'false') in query_string + # Validate body params + + def test_add_audio_all_params_with_retries(self): + # Enable retries and run test_add_audio_all_params. + _service.enable_retries() + self.test_add_audio_all_params() + + # Disable retries and run test_add_audio_all_params. + _service.disable_retries() + self.test_add_audio_all_params() + + @responses.activate + def test_add_audio_required_params(self): + """ + test_add_audio_required_params() + """ + # Set up mock + url = preprocess_url('/v1/acoustic_customizations/testString/audio/testString') + responses.add( + responses.POST, + url, + status=201, + ) + + # Set up parameter values + customization_id = 'testString' + audio_name = 'testString' + audio_resource = io.BytesIO(b'This is a mock file.').getvalue() + + # Invoke method + response = _service.add_audio( + customization_id, + audio_name, + audio_resource, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + # Validate body params + + def test_add_audio_required_params_with_retries(self): + # Enable retries and run test_add_audio_required_params. + _service.enable_retries() + self.test_add_audio_required_params() + + # Disable retries and run test_add_audio_required_params. + _service.disable_retries() + self.test_add_audio_required_params() + + @responses.activate + def test_add_audio_value_error(self): + """ + test_add_audio_value_error() + """ + # Set up mock + url = preprocess_url('/v1/acoustic_customizations/testString/audio/testString') + responses.add( + responses.POST, + url, + status=201, + ) + + # Set up parameter values + customization_id = 'testString' + audio_name = 'testString' + audio_resource = io.BytesIO(b'This is a mock file.').getvalue() + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + "audio_name": audio_name, + "audio_resource": audio_resource, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.add_audio(**req_copy) + + def test_add_audio_value_error_with_retries(self): + # Enable retries and run test_add_audio_value_error. + _service.enable_retries() + self.test_add_audio_value_error() + + # Disable retries and run test_add_audio_value_error. + _service.disable_retries() + self.test_add_audio_value_error() + + +class TestGetAudio: + """ + Test Class for get_audio + """ + + @responses.activate + def test_get_audio_all_params(self): + """ + get_audio() + """ + # Set up mock + url = preprocess_url('/v1/acoustic_customizations/testString/audio/testString') + mock_response = '{"duration": 8, "name": "name", "details": {"type": "audio", "codec": "codec", "frequency": 9, "compression": "zip"}, "status": "ok", "container": {"duration": 8, "name": "name", "details": {"type": "audio", "codec": "codec", "frequency": 9, "compression": "zip"}, "status": "ok"}, "audio": [{"duration": 8, "name": "name", "details": {"type": "audio", "codec": "codec", "frequency": 9, "compression": "zip"}, "status": "ok"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + audio_name = 'testString' + + # Invoke method + response = _service.get_audio( + customization_id, + audio_name, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_get_audio_all_params_with_retries(self): + # Enable retries and run test_get_audio_all_params. + _service.enable_retries() + self.test_get_audio_all_params() + + # Disable retries and run test_get_audio_all_params. + _service.disable_retries() + self.test_get_audio_all_params() + + @responses.activate + def test_get_audio_value_error(self): + """ + test_get_audio_value_error() + """ + # Set up mock + url = preprocess_url('/v1/acoustic_customizations/testString/audio/testString') + mock_response = '{"duration": 8, "name": "name", "details": {"type": "audio", "codec": "codec", "frequency": 9, "compression": "zip"}, "status": "ok", "container": {"duration": 8, "name": "name", "details": {"type": "audio", "codec": "codec", "frequency": 9, "compression": "zip"}, "status": "ok"}, "audio": [{"duration": 8, "name": "name", "details": {"type": "audio", "codec": "codec", "frequency": 9, "compression": "zip"}, "status": "ok"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + audio_name = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + "audio_name": audio_name, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_audio(**req_copy) + + def test_get_audio_value_error_with_retries(self): + # Enable retries and run test_get_audio_value_error. + _service.enable_retries() + self.test_get_audio_value_error() + + # Disable retries and run test_get_audio_value_error. + _service.disable_retries() + self.test_get_audio_value_error() + + +class TestDeleteAudio: + """ + Test Class for delete_audio + """ + + @responses.activate + def test_delete_audio_all_params(self): + """ + delete_audio() + """ + # Set up mock + url = preprocess_url('/v1/acoustic_customizations/testString/audio/testString') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + audio_name = 'testString' + + # Invoke method + response = _service.delete_audio( + customization_id, + audio_name, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_delete_audio_all_params_with_retries(self): + # Enable retries and run test_delete_audio_all_params. + _service.enable_retries() + self.test_delete_audio_all_params() + + # Disable retries and run test_delete_audio_all_params. + _service.disable_retries() + self.test_delete_audio_all_params() + + @responses.activate + def test_delete_audio_value_error(self): + """ + test_delete_audio_value_error() + """ + # Set up mock + url = preprocess_url('/v1/acoustic_customizations/testString/audio/testString') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + audio_name = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + "audio_name": audio_name, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_audio(**req_copy) + + def test_delete_audio_value_error_with_retries(self): + # Enable retries and run test_delete_audio_value_error. + _service.enable_retries() + self.test_delete_audio_value_error() + + # Disable retries and run test_delete_audio_value_error. + _service.disable_retries() + self.test_delete_audio_value_error() + + +# endregion +############################################################################## +# End of Service: CustomAudioResources +############################################################################## + +############################################################################## +# Start of Service: UserData +############################################################################## +# region + + +class TestDeleteUserData: + """ + Test Class for delete_user_data + """ + + @responses.activate + def test_delete_user_data_all_params(self): + """ + delete_user_data() + """ + # Set up mock + url = preprocess_url('/v1/user_data') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + customer_id = 'testString' + + # Invoke method + response = _service.delete_user_data( + customer_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'customer_id={}'.format(customer_id) in query_string + + def test_delete_user_data_all_params_with_retries(self): + # Enable retries and run test_delete_user_data_all_params. + _service.enable_retries() + self.test_delete_user_data_all_params() + + # Disable retries and run test_delete_user_data_all_params. + _service.disable_retries() + self.test_delete_user_data_all_params() + + @responses.activate + def test_delete_user_data_value_error(self): + """ + test_delete_user_data_value_error() + """ + # Set up mock + url = preprocess_url('/v1/user_data') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + customer_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customer_id": customer_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_user_data(**req_copy) + + def test_delete_user_data_value_error_with_retries(self): + # Enable retries and run test_delete_user_data_value_error. + _service.enable_retries() + self.test_delete_user_data_value_error() + + # Disable retries and run test_delete_user_data_value_error. + _service.disable_retries() + self.test_delete_user_data_value_error() + + +# endregion +############################################################################## +# End of Service: UserData +############################################################################## + +############################################################################## +# Start of Service: LanguageIdentification +############################################################################## +# region + + +class TestDetectLanguage: + """ + Test Class for detect_language + """ + + @responses.activate + def test_detect_language_all_params(self): + """ + detect_language() + """ + # Set up mock + url = preprocess_url('/v1/detect_language') + mock_response = '{"results": [{"language_info": [{"confidence": 10, "language": "language", "timestamp": 9}]}], "result_index": 12}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + lid_confidence = 36.0 + audio = io.BytesIO(b'This is a mock file.').getvalue() + content_type = 'application/octet-stream' + + # Invoke method + response = _service.detect_language( + lid_confidence, + audio, + content_type=content_type, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + # Validate body params + + def test_detect_language_all_params_with_retries(self): + # Enable retries and run test_detect_language_all_params. + _service.enable_retries() + self.test_detect_language_all_params() + + # Disable retries and run test_detect_language_all_params. + _service.disable_retries() + self.test_detect_language_all_params() + + @responses.activate + def test_detect_language_required_params(self): + """ + test_detect_language_required_params() + """ + # Set up mock + url = preprocess_url('/v1/detect_language') + mock_response = '{"results": [{"language_info": [{"confidence": 10, "language": "language", "timestamp": 9}]}], "result_index": 12}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + lid_confidence = 36.0 + audio = io.BytesIO(b'This is a mock file.').getvalue() + + # Invoke method + response = _service.detect_language( + lid_confidence, + audio, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + # Validate body params + + def test_detect_language_required_params_with_retries(self): + # Enable retries and run test_detect_language_required_params. + _service.enable_retries() + self.test_detect_language_required_params() + + # Disable retries and run test_detect_language_required_params. + _service.disable_retries() + self.test_detect_language_required_params() + + @responses.activate + def test_detect_language_value_error(self): + """ + test_detect_language_value_error() + """ + # Set up mock + url = preprocess_url('/v1/detect_language') + mock_response = '{"results": [{"language_info": [{"confidence": 10, "language": "language", "timestamp": 9}]}], "result_index": 12}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + lid_confidence = 36.0 + audio = io.BytesIO(b'This is a mock file.').getvalue() + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "lid_confidence": lid_confidence, + "audio": audio, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.detect_language(**req_copy) + + def test_detect_language_value_error_with_retries(self): + # Enable retries and run test_detect_language_value_error. + _service.enable_retries() + self.test_detect_language_value_error() + + # Disable retries and run test_detect_language_value_error. + _service.disable_retries() + self.test_detect_language_value_error() + + +# endregion +############################################################################## +# End of Service: LanguageIdentification +############################################################################## + + +############################################################################## +# Start of Model Tests +############################################################################## +# region + + +class TestModel_AcousticModel: + """ + Test Class for AcousticModel + """ + + def test_acoustic_model_serialization(self): + """ + Test serialization/deserialization for AcousticModel + """ + + # Construct a json representation of a AcousticModel model + acoustic_model_model_json = {} + acoustic_model_model_json['customization_id'] = 'testString' + acoustic_model_model_json['created'] = 'testString' + acoustic_model_model_json['updated'] = 'testString' + acoustic_model_model_json['language'] = 'testString' + acoustic_model_model_json['versions'] = ['testString'] + acoustic_model_model_json['owner'] = 'testString' + acoustic_model_model_json['name'] = 'testString' + acoustic_model_model_json['description'] = 'testString' + acoustic_model_model_json['base_model_name'] = 'testString' + acoustic_model_model_json['status'] = 'pending' + acoustic_model_model_json['progress'] = 38 + acoustic_model_model_json['warnings'] = 'testString' + + # Construct a model instance of AcousticModel by calling from_dict on the json representation + acoustic_model_model = AcousticModel.from_dict(acoustic_model_model_json) + assert acoustic_model_model != False + + # Construct a model instance of AcousticModel by calling from_dict on the json representation + acoustic_model_model_dict = AcousticModel.from_dict(acoustic_model_model_json).__dict__ + acoustic_model_model2 = AcousticModel(**acoustic_model_model_dict) + + # Verify the model instances are equivalent + assert acoustic_model_model == acoustic_model_model2 + + # Convert model instance back to dict and verify no loss of data + acoustic_model_model_json2 = acoustic_model_model.to_dict() + assert acoustic_model_model_json2 == acoustic_model_model_json + + +class TestModel_AcousticModels: + """ + Test Class for AcousticModels + """ + + def test_acoustic_models_serialization(self): + """ + Test serialization/deserialization for AcousticModels + """ + + # Construct dict forms of any model objects needed in order to build this model. + + acoustic_model_model = {} # AcousticModel + acoustic_model_model['customization_id'] = 'testString' + acoustic_model_model['created'] = 'testString' + acoustic_model_model['updated'] = 'testString' + acoustic_model_model['language'] = 'testString' + acoustic_model_model['versions'] = ['testString'] + acoustic_model_model['owner'] = 'testString' + acoustic_model_model['name'] = 'testString' + acoustic_model_model['description'] = 'testString' + acoustic_model_model['base_model_name'] = 'testString' + acoustic_model_model['status'] = 'pending' + acoustic_model_model['progress'] = 38 + acoustic_model_model['warnings'] = 'testString' + + # Construct a json representation of a AcousticModels model + acoustic_models_model_json = {} + acoustic_models_model_json['customizations'] = [acoustic_model_model] + + # Construct a model instance of AcousticModels by calling from_dict on the json representation + acoustic_models_model = AcousticModels.from_dict(acoustic_models_model_json) + assert acoustic_models_model != False + + # Construct a model instance of AcousticModels by calling from_dict on the json representation + acoustic_models_model_dict = AcousticModels.from_dict(acoustic_models_model_json).__dict__ + acoustic_models_model2 = AcousticModels(**acoustic_models_model_dict) + + # Verify the model instances are equivalent + assert acoustic_models_model == acoustic_models_model2 + + # Convert model instance back to dict and verify no loss of data + acoustic_models_model_json2 = acoustic_models_model.to_dict() + assert acoustic_models_model_json2 == acoustic_models_model_json + + +class TestModel_AudioDetails: + """ + Test Class for AudioDetails + """ + + def test_audio_details_serialization(self): + """ + Test serialization/deserialization for AudioDetails + """ + + # Construct a json representation of a AudioDetails model + audio_details_model_json = {} + audio_details_model_json['type'] = 'audio' + audio_details_model_json['codec'] = 'testString' + audio_details_model_json['frequency'] = 38 + audio_details_model_json['compression'] = 'zip' + + # Construct a model instance of AudioDetails by calling from_dict on the json representation + audio_details_model = AudioDetails.from_dict(audio_details_model_json) + assert audio_details_model != False + + # Construct a model instance of AudioDetails by calling from_dict on the json representation + audio_details_model_dict = AudioDetails.from_dict(audio_details_model_json).__dict__ + audio_details_model2 = AudioDetails(**audio_details_model_dict) + + # Verify the model instances are equivalent + assert audio_details_model == audio_details_model2 + + # Convert model instance back to dict and verify no loss of data + audio_details_model_json2 = audio_details_model.to_dict() + assert audio_details_model_json2 == audio_details_model_json + + +class TestModel_AudioListing: + """ + Test Class for AudioListing + """ + + def test_audio_listing_serialization(self): + """ + Test serialization/deserialization for AudioListing + """ + + # Construct dict forms of any model objects needed in order to build this model. + + audio_details_model = {} # AudioDetails + audio_details_model['type'] = 'audio' + audio_details_model['codec'] = 'testString' + audio_details_model['frequency'] = 38 + audio_details_model['compression'] = 'zip' + + audio_resource_model = {} # AudioResource + audio_resource_model['duration'] = 38 + audio_resource_model['name'] = 'testString' + audio_resource_model['details'] = audio_details_model + audio_resource_model['status'] = 'ok' + + # Construct a json representation of a AudioListing model + audio_listing_model_json = {} + audio_listing_model_json['duration'] = 38 + audio_listing_model_json['name'] = 'testString' + audio_listing_model_json['details'] = audio_details_model + audio_listing_model_json['status'] = 'ok' + audio_listing_model_json['container'] = audio_resource_model + audio_listing_model_json['audio'] = [audio_resource_model] + + # Construct a model instance of AudioListing by calling from_dict on the json representation + audio_listing_model = AudioListing.from_dict(audio_listing_model_json) + assert audio_listing_model != False + + # Construct a model instance of AudioListing by calling from_dict on the json representation + audio_listing_model_dict = AudioListing.from_dict(audio_listing_model_json).__dict__ + audio_listing_model2 = AudioListing(**audio_listing_model_dict) + + # Verify the model instances are equivalent + assert audio_listing_model == audio_listing_model2 + + # Convert model instance back to dict and verify no loss of data + audio_listing_model_json2 = audio_listing_model.to_dict() + assert audio_listing_model_json2 == audio_listing_model_json + + +class TestModel_AudioMetrics: + """ + Test Class for AudioMetrics + """ + + def test_audio_metrics_serialization(self): + """ + Test serialization/deserialization for AudioMetrics + """ + + # Construct dict forms of any model objects needed in order to build this model. + + audio_metrics_histogram_bin_model = {} # AudioMetricsHistogramBin + audio_metrics_histogram_bin_model['begin'] = 36.0 + audio_metrics_histogram_bin_model['end'] = 36.0 + audio_metrics_histogram_bin_model['count'] = 38 + + audio_metrics_details_model = {} # AudioMetricsDetails + audio_metrics_details_model['final'] = True + audio_metrics_details_model['end_time'] = 36.0 + audio_metrics_details_model['signal_to_noise_ratio'] = 36.0 + audio_metrics_details_model['speech_ratio'] = 36.0 + audio_metrics_details_model['high_frequency_loss'] = 36.0 + audio_metrics_details_model['direct_current_offset'] = [audio_metrics_histogram_bin_model] + audio_metrics_details_model['clipping_rate'] = [audio_metrics_histogram_bin_model] + audio_metrics_details_model['speech_level'] = [audio_metrics_histogram_bin_model] + audio_metrics_details_model['non_speech_level'] = [audio_metrics_histogram_bin_model] + + # Construct a json representation of a AudioMetrics model + audio_metrics_model_json = {} + audio_metrics_model_json['sampling_interval'] = 36.0 + audio_metrics_model_json['accumulated'] = audio_metrics_details_model + + # Construct a model instance of AudioMetrics by calling from_dict on the json representation + audio_metrics_model = AudioMetrics.from_dict(audio_metrics_model_json) + assert audio_metrics_model != False + + # Construct a model instance of AudioMetrics by calling from_dict on the json representation + audio_metrics_model_dict = AudioMetrics.from_dict(audio_metrics_model_json).__dict__ + audio_metrics_model2 = AudioMetrics(**audio_metrics_model_dict) + + # Verify the model instances are equivalent + assert audio_metrics_model == audio_metrics_model2 + + # Convert model instance back to dict and verify no loss of data + audio_metrics_model_json2 = audio_metrics_model.to_dict() + assert audio_metrics_model_json2 == audio_metrics_model_json + + +class TestModel_AudioMetricsDetails: + """ + Test Class for AudioMetricsDetails + """ + + def test_audio_metrics_details_serialization(self): + """ + Test serialization/deserialization for AudioMetricsDetails + """ + + # Construct dict forms of any model objects needed in order to build this model. + + audio_metrics_histogram_bin_model = {} # AudioMetricsHistogramBin + audio_metrics_histogram_bin_model['begin'] = 36.0 + audio_metrics_histogram_bin_model['end'] = 36.0 + audio_metrics_histogram_bin_model['count'] = 38 + + # Construct a json representation of a AudioMetricsDetails model + audio_metrics_details_model_json = {} + audio_metrics_details_model_json['final'] = True + audio_metrics_details_model_json['end_time'] = 36.0 + audio_metrics_details_model_json['signal_to_noise_ratio'] = 36.0 + audio_metrics_details_model_json['speech_ratio'] = 36.0 + audio_metrics_details_model_json['high_frequency_loss'] = 36.0 + audio_metrics_details_model_json['direct_current_offset'] = [audio_metrics_histogram_bin_model] + audio_metrics_details_model_json['clipping_rate'] = [audio_metrics_histogram_bin_model] + audio_metrics_details_model_json['speech_level'] = [audio_metrics_histogram_bin_model] + audio_metrics_details_model_json['non_speech_level'] = [audio_metrics_histogram_bin_model] + + # Construct a model instance of AudioMetricsDetails by calling from_dict on the json representation + audio_metrics_details_model = AudioMetricsDetails.from_dict(audio_metrics_details_model_json) + assert audio_metrics_details_model != False + + # Construct a model instance of AudioMetricsDetails by calling from_dict on the json representation + audio_metrics_details_model_dict = AudioMetricsDetails.from_dict(audio_metrics_details_model_json).__dict__ + audio_metrics_details_model2 = AudioMetricsDetails(**audio_metrics_details_model_dict) + + # Verify the model instances are equivalent + assert audio_metrics_details_model == audio_metrics_details_model2 + + # Convert model instance back to dict and verify no loss of data + audio_metrics_details_model_json2 = audio_metrics_details_model.to_dict() + assert audio_metrics_details_model_json2 == audio_metrics_details_model_json + + +class TestModel_AudioMetricsHistogramBin: + """ + Test Class for AudioMetricsHistogramBin + """ + + def test_audio_metrics_histogram_bin_serialization(self): + """ + Test serialization/deserialization for AudioMetricsHistogramBin + """ + + # Construct a json representation of a AudioMetricsHistogramBin model + audio_metrics_histogram_bin_model_json = {} + audio_metrics_histogram_bin_model_json['begin'] = 36.0 + audio_metrics_histogram_bin_model_json['end'] = 36.0 + audio_metrics_histogram_bin_model_json['count'] = 38 + + # Construct a model instance of AudioMetricsHistogramBin by calling from_dict on the json representation + audio_metrics_histogram_bin_model = AudioMetricsHistogramBin.from_dict(audio_metrics_histogram_bin_model_json) + assert audio_metrics_histogram_bin_model != False + + # Construct a model instance of AudioMetricsHistogramBin by calling from_dict on the json representation + audio_metrics_histogram_bin_model_dict = AudioMetricsHistogramBin.from_dict(audio_metrics_histogram_bin_model_json).__dict__ + audio_metrics_histogram_bin_model2 = AudioMetricsHistogramBin(**audio_metrics_histogram_bin_model_dict) + + # Verify the model instances are equivalent + assert audio_metrics_histogram_bin_model == audio_metrics_histogram_bin_model2 + + # Convert model instance back to dict and verify no loss of data + audio_metrics_histogram_bin_model_json2 = audio_metrics_histogram_bin_model.to_dict() + assert audio_metrics_histogram_bin_model_json2 == audio_metrics_histogram_bin_model_json + + +class TestModel_AudioResource: + """ + Test Class for AudioResource + """ + + def test_audio_resource_serialization(self): + """ + Test serialization/deserialization for AudioResource + """ + + # Construct dict forms of any model objects needed in order to build this model. + + audio_details_model = {} # AudioDetails + audio_details_model['type'] = 'audio' + audio_details_model['codec'] = 'testString' + audio_details_model['frequency'] = 38 + audio_details_model['compression'] = 'zip' + + # Construct a json representation of a AudioResource model + audio_resource_model_json = {} + audio_resource_model_json['duration'] = 38 + audio_resource_model_json['name'] = 'testString' + audio_resource_model_json['details'] = audio_details_model + audio_resource_model_json['status'] = 'ok' + + # Construct a model instance of AudioResource by calling from_dict on the json representation + audio_resource_model = AudioResource.from_dict(audio_resource_model_json) + assert audio_resource_model != False + + # Construct a model instance of AudioResource by calling from_dict on the json representation + audio_resource_model_dict = AudioResource.from_dict(audio_resource_model_json).__dict__ + audio_resource_model2 = AudioResource(**audio_resource_model_dict) + + # Verify the model instances are equivalent + assert audio_resource_model == audio_resource_model2 + + # Convert model instance back to dict and verify no loss of data + audio_resource_model_json2 = audio_resource_model.to_dict() + assert audio_resource_model_json2 == audio_resource_model_json + + +class TestModel_AudioResources: + """ + Test Class for AudioResources + """ + + def test_audio_resources_serialization(self): + """ + Test serialization/deserialization for AudioResources + """ + + # Construct dict forms of any model objects needed in order to build this model. + + audio_details_model = {} # AudioDetails + audio_details_model['type'] = 'audio' + audio_details_model['codec'] = 'testString' + audio_details_model['frequency'] = 38 + audio_details_model['compression'] = 'zip' + + audio_resource_model = {} # AudioResource + audio_resource_model['duration'] = 38 + audio_resource_model['name'] = 'testString' + audio_resource_model['details'] = audio_details_model + audio_resource_model['status'] = 'ok' + + # Construct a json representation of a AudioResources model + audio_resources_model_json = {} + audio_resources_model_json['total_minutes_of_audio'] = 72.5 + audio_resources_model_json['audio'] = [audio_resource_model] + + # Construct a model instance of AudioResources by calling from_dict on the json representation + audio_resources_model = AudioResources.from_dict(audio_resources_model_json) + assert audio_resources_model != False + + # Construct a model instance of AudioResources by calling from_dict on the json representation + audio_resources_model_dict = AudioResources.from_dict(audio_resources_model_json).__dict__ + audio_resources_model2 = AudioResources(**audio_resources_model_dict) + + # Verify the model instances are equivalent + assert audio_resources_model == audio_resources_model2 + + # Convert model instance back to dict and verify no loss of data + audio_resources_model_json2 = audio_resources_model.to_dict() + assert audio_resources_model_json2 == audio_resources_model_json + + +class TestModel_Corpora: + """ + Test Class for Corpora + """ + + def test_corpora_serialization(self): + """ + Test serialization/deserialization for Corpora + """ + + # Construct dict forms of any model objects needed in order to build this model. + + corpus_model = {} # Corpus + corpus_model['name'] = 'testString' + corpus_model['total_words'] = 38 + corpus_model['out_of_vocabulary_words'] = 38 + corpus_model['status'] = 'analyzed' + corpus_model['error'] = 'testString' + + # Construct a json representation of a Corpora model + corpora_model_json = {} + corpora_model_json['corpora'] = [corpus_model] + + # Construct a model instance of Corpora by calling from_dict on the json representation + corpora_model = Corpora.from_dict(corpora_model_json) + assert corpora_model != False + + # Construct a model instance of Corpora by calling from_dict on the json representation + corpora_model_dict = Corpora.from_dict(corpora_model_json).__dict__ + corpora_model2 = Corpora(**corpora_model_dict) + + # Verify the model instances are equivalent + assert corpora_model == corpora_model2 + + # Convert model instance back to dict and verify no loss of data + corpora_model_json2 = corpora_model.to_dict() + assert corpora_model_json2 == corpora_model_json + + +class TestModel_Corpus: + """ + Test Class for Corpus + """ + + def test_corpus_serialization(self): + """ + Test serialization/deserialization for Corpus + """ + + # Construct a json representation of a Corpus model + corpus_model_json = {} + corpus_model_json['name'] = 'testString' + corpus_model_json['total_words'] = 38 + corpus_model_json['out_of_vocabulary_words'] = 38 + corpus_model_json['status'] = 'analyzed' + corpus_model_json['error'] = 'testString' + + # Construct a model instance of Corpus by calling from_dict on the json representation + corpus_model = Corpus.from_dict(corpus_model_json) + assert corpus_model != False + + # Construct a model instance of Corpus by calling from_dict on the json representation + corpus_model_dict = Corpus.from_dict(corpus_model_json).__dict__ + corpus_model2 = Corpus(**corpus_model_dict) + + # Verify the model instances are equivalent + assert corpus_model == corpus_model2 + + # Convert model instance back to dict and verify no loss of data + corpus_model_json2 = corpus_model.to_dict() + assert corpus_model_json2 == corpus_model_json + + +class TestModel_CustomWord: + """ + Test Class for CustomWord + """ + + def test_custom_word_serialization(self): + """ + Test serialization/deserialization for CustomWord + """ + + # Construct a json representation of a CustomWord model + custom_word_model_json = {} + custom_word_model_json['word'] = 'testString' + custom_word_model_json['mapping_only'] = ['testString'] + custom_word_model_json['sounds_like'] = ['testString'] + custom_word_model_json['display_as'] = 'testString' + + # Construct a model instance of CustomWord by calling from_dict on the json representation + custom_word_model = CustomWord.from_dict(custom_word_model_json) + assert custom_word_model != False + + # Construct a model instance of CustomWord by calling from_dict on the json representation + custom_word_model_dict = CustomWord.from_dict(custom_word_model_json).__dict__ + custom_word_model2 = CustomWord(**custom_word_model_dict) + + # Verify the model instances are equivalent + assert custom_word_model == custom_word_model2 + + # Convert model instance back to dict and verify no loss of data + custom_word_model_json2 = custom_word_model.to_dict() + assert custom_word_model_json2 == custom_word_model_json + + +class TestModel_EnrichedResults: + """ + Test Class for EnrichedResults + """ + + def test_enriched_results_serialization(self): + """ + Test serialization/deserialization for EnrichedResults + """ + + # Construct dict forms of any model objects needed in order to build this model. + + enriched_results_transcript_timestamp_model = {} # EnrichedResultsTranscriptTimestamp + enriched_results_transcript_timestamp_model['from'] = 36.0 + enriched_results_transcript_timestamp_model['to'] = 36.0 + + enriched_results_transcript_model = {} # EnrichedResultsTranscript + enriched_results_transcript_model['text'] = 'testString' + enriched_results_transcript_model['timestamp'] = enriched_results_transcript_timestamp_model + + # Construct a json representation of a EnrichedResults model + enriched_results_model_json = {} + enriched_results_model_json['transcript'] = enriched_results_transcript_model + enriched_results_model_json['status'] = 'testString' + + # Construct a model instance of EnrichedResults by calling from_dict on the json representation + enriched_results_model = EnrichedResults.from_dict(enriched_results_model_json) + assert enriched_results_model != False + + # Construct a model instance of EnrichedResults by calling from_dict on the json representation + enriched_results_model_dict = EnrichedResults.from_dict(enriched_results_model_json).__dict__ + enriched_results_model2 = EnrichedResults(**enriched_results_model_dict) + + # Verify the model instances are equivalent + assert enriched_results_model == enriched_results_model2 + + # Convert model instance back to dict and verify no loss of data + enriched_results_model_json2 = enriched_results_model.to_dict() + assert enriched_results_model_json2 == enriched_results_model_json + + +class TestModel_EnrichedResultsTranscript: + """ + Test Class for EnrichedResultsTranscript + """ + + def test_enriched_results_transcript_serialization(self): + """ + Test serialization/deserialization for EnrichedResultsTranscript + """ + + # Construct dict forms of any model objects needed in order to build this model. + + enriched_results_transcript_timestamp_model = {} # EnrichedResultsTranscriptTimestamp + enriched_results_transcript_timestamp_model['from'] = 36.0 + enriched_results_transcript_timestamp_model['to'] = 36.0 + + # Construct a json representation of a EnrichedResultsTranscript model + enriched_results_transcript_model_json = {} + enriched_results_transcript_model_json['text'] = 'testString' + enriched_results_transcript_model_json['timestamp'] = enriched_results_transcript_timestamp_model + + # Construct a model instance of EnrichedResultsTranscript by calling from_dict on the json representation + enriched_results_transcript_model = EnrichedResultsTranscript.from_dict(enriched_results_transcript_model_json) + assert enriched_results_transcript_model != False + + # Construct a model instance of EnrichedResultsTranscript by calling from_dict on the json representation + enriched_results_transcript_model_dict = EnrichedResultsTranscript.from_dict(enriched_results_transcript_model_json).__dict__ + enriched_results_transcript_model2 = EnrichedResultsTranscript(**enriched_results_transcript_model_dict) + + # Verify the model instances are equivalent + assert enriched_results_transcript_model == enriched_results_transcript_model2 + + # Convert model instance back to dict and verify no loss of data + enriched_results_transcript_model_json2 = enriched_results_transcript_model.to_dict() + assert enriched_results_transcript_model_json2 == enriched_results_transcript_model_json + + +class TestModel_EnrichedResultsTranscriptTimestamp: + """ + Test Class for EnrichedResultsTranscriptTimestamp + """ + + def test_enriched_results_transcript_timestamp_serialization(self): + """ + Test serialization/deserialization for EnrichedResultsTranscriptTimestamp + """ + + # Construct a json representation of a EnrichedResultsTranscriptTimestamp model + enriched_results_transcript_timestamp_model_json = {} + enriched_results_transcript_timestamp_model_json['from'] = 36.0 + enriched_results_transcript_timestamp_model_json['to'] = 36.0 + + # Construct a model instance of EnrichedResultsTranscriptTimestamp by calling from_dict on the json representation + enriched_results_transcript_timestamp_model = EnrichedResultsTranscriptTimestamp.from_dict(enriched_results_transcript_timestamp_model_json) + assert enriched_results_transcript_timestamp_model != False + + # Construct a model instance of EnrichedResultsTranscriptTimestamp by calling from_dict on the json representation + enriched_results_transcript_timestamp_model_dict = EnrichedResultsTranscriptTimestamp.from_dict(enriched_results_transcript_timestamp_model_json).__dict__ + enriched_results_transcript_timestamp_model2 = EnrichedResultsTranscriptTimestamp(**enriched_results_transcript_timestamp_model_dict) + + # Verify the model instances are equivalent + assert enriched_results_transcript_timestamp_model == enriched_results_transcript_timestamp_model2 + + # Convert model instance back to dict and verify no loss of data + enriched_results_transcript_timestamp_model_json2 = enriched_results_transcript_timestamp_model.to_dict() + assert enriched_results_transcript_timestamp_model_json2 == enriched_results_transcript_timestamp_model_json + + +class TestModel_Grammar: + """ + Test Class for Grammar + """ + + def test_grammar_serialization(self): + """ + Test serialization/deserialization for Grammar + """ + + # Construct a json representation of a Grammar model + grammar_model_json = {} + grammar_model_json['name'] = 'testString' + grammar_model_json['out_of_vocabulary_words'] = 38 + grammar_model_json['status'] = 'analyzed' + grammar_model_json['error'] = 'testString' + + # Construct a model instance of Grammar by calling from_dict on the json representation + grammar_model = Grammar.from_dict(grammar_model_json) + assert grammar_model != False + + # Construct a model instance of Grammar by calling from_dict on the json representation + grammar_model_dict = Grammar.from_dict(grammar_model_json).__dict__ + grammar_model2 = Grammar(**grammar_model_dict) + + # Verify the model instances are equivalent + assert grammar_model == grammar_model2 + + # Convert model instance back to dict and verify no loss of data + grammar_model_json2 = grammar_model.to_dict() + assert grammar_model_json2 == grammar_model_json + + +class TestModel_Grammars: + """ + Test Class for Grammars + """ + + def test_grammars_serialization(self): + """ + Test serialization/deserialization for Grammars + """ + + # Construct dict forms of any model objects needed in order to build this model. + + grammar_model = {} # Grammar + grammar_model['name'] = 'testString' + grammar_model['out_of_vocabulary_words'] = 38 + grammar_model['status'] = 'analyzed' + grammar_model['error'] = 'testString' + + # Construct a json representation of a Grammars model + grammars_model_json = {} + grammars_model_json['grammars'] = [grammar_model] + + # Construct a model instance of Grammars by calling from_dict on the json representation + grammars_model = Grammars.from_dict(grammars_model_json) + assert grammars_model != False + + # Construct a model instance of Grammars by calling from_dict on the json representation + grammars_model_dict = Grammars.from_dict(grammars_model_json).__dict__ + grammars_model2 = Grammars(**grammars_model_dict) + + # Verify the model instances are equivalent + assert grammars_model == grammars_model2 + + # Convert model instance back to dict and verify no loss of data + grammars_model_json2 = grammars_model.to_dict() + assert grammars_model_json2 == grammars_model_json + + +class TestModel_KeywordResult: + """ + Test Class for KeywordResult + """ + + def test_keyword_result_serialization(self): + """ + Test serialization/deserialization for KeywordResult + """ + + # Construct a json representation of a KeywordResult model + keyword_result_model_json = {} + keyword_result_model_json['normalized_text'] = 'testString' + keyword_result_model_json['start_time'] = 72.5 + keyword_result_model_json['end_time'] = 72.5 + keyword_result_model_json['confidence'] = 0 + + # Construct a model instance of KeywordResult by calling from_dict on the json representation + keyword_result_model = KeywordResult.from_dict(keyword_result_model_json) + assert keyword_result_model != False + + # Construct a model instance of KeywordResult by calling from_dict on the json representation + keyword_result_model_dict = KeywordResult.from_dict(keyword_result_model_json).__dict__ + keyword_result_model2 = KeywordResult(**keyword_result_model_dict) + + # Verify the model instances are equivalent + assert keyword_result_model == keyword_result_model2 + + # Convert model instance back to dict and verify no loss of data + keyword_result_model_json2 = keyword_result_model.to_dict() + assert keyword_result_model_json2 == keyword_result_model_json + + +class TestModel_LanguageDetectionResult: + """ + Test Class for LanguageDetectionResult + """ + + def test_language_detection_result_serialization(self): + """ + Test serialization/deserialization for LanguageDetectionResult + """ + + # Construct dict forms of any model objects needed in order to build this model. + + language_info_model = {} # LanguageInfo + language_info_model['confidence'] = 36.0 + language_info_model['language'] = 'testString' + language_info_model['timestamp'] = 36.0 + + # Construct a json representation of a LanguageDetectionResult model + language_detection_result_model_json = {} + language_detection_result_model_json['language_info'] = [language_info_model] + + # Construct a model instance of LanguageDetectionResult by calling from_dict on the json representation + language_detection_result_model = LanguageDetectionResult.from_dict(language_detection_result_model_json) + assert language_detection_result_model != False + + # Construct a model instance of LanguageDetectionResult by calling from_dict on the json representation + language_detection_result_model_dict = LanguageDetectionResult.from_dict(language_detection_result_model_json).__dict__ + language_detection_result_model2 = LanguageDetectionResult(**language_detection_result_model_dict) + + # Verify the model instances are equivalent + assert language_detection_result_model == language_detection_result_model2 + + # Convert model instance back to dict and verify no loss of data + language_detection_result_model_json2 = language_detection_result_model.to_dict() + assert language_detection_result_model_json2 == language_detection_result_model_json + + +class TestModel_LanguageDetectionResults: + """ + Test Class for LanguageDetectionResults + """ + + def test_language_detection_results_serialization(self): + """ + Test serialization/deserialization for LanguageDetectionResults + """ + + # Construct dict forms of any model objects needed in order to build this model. + + language_info_model = {} # LanguageInfo + language_info_model['confidence'] = 36.0 + language_info_model['language'] = 'testString' + language_info_model['timestamp'] = 36.0 + + language_detection_result_model = {} # LanguageDetectionResult + language_detection_result_model['language_info'] = [language_info_model] + + # Construct a json representation of a LanguageDetectionResults model + language_detection_results_model_json = {} + language_detection_results_model_json['results'] = [language_detection_result_model] + language_detection_results_model_json['result_index'] = 38 + + # Construct a model instance of LanguageDetectionResults by calling from_dict on the json representation + language_detection_results_model = LanguageDetectionResults.from_dict(language_detection_results_model_json) + assert language_detection_results_model != False + + # Construct a model instance of LanguageDetectionResults by calling from_dict on the json representation + language_detection_results_model_dict = LanguageDetectionResults.from_dict(language_detection_results_model_json).__dict__ + language_detection_results_model2 = LanguageDetectionResults(**language_detection_results_model_dict) + + # Verify the model instances are equivalent + assert language_detection_results_model == language_detection_results_model2 + + # Convert model instance back to dict and verify no loss of data + language_detection_results_model_json2 = language_detection_results_model.to_dict() + assert language_detection_results_model_json2 == language_detection_results_model_json + + +class TestModel_LanguageInfo: + """ + Test Class for LanguageInfo + """ + + def test_language_info_serialization(self): + """ + Test serialization/deserialization for LanguageInfo + """ + + # Construct a json representation of a LanguageInfo model + language_info_model_json = {} + language_info_model_json['confidence'] = 36.0 + language_info_model_json['language'] = 'testString' + language_info_model_json['timestamp'] = 36.0 + + # Construct a model instance of LanguageInfo by calling from_dict on the json representation + language_info_model = LanguageInfo.from_dict(language_info_model_json) + assert language_info_model != False + + # Construct a model instance of LanguageInfo by calling from_dict on the json representation + language_info_model_dict = LanguageInfo.from_dict(language_info_model_json).__dict__ + language_info_model2 = LanguageInfo(**language_info_model_dict) + + # Verify the model instances are equivalent + assert language_info_model == language_info_model2 + + # Convert model instance back to dict and verify no loss of data + language_info_model_json2 = language_info_model.to_dict() + assert language_info_model_json2 == language_info_model_json + + +class TestModel_LanguageModel: + """ + Test Class for LanguageModel + """ + + def test_language_model_serialization(self): + """ + Test serialization/deserialization for LanguageModel + """ + + # Construct a json representation of a LanguageModel model + language_model_model_json = {} + language_model_model_json['customization_id'] = 'testString' + language_model_model_json['created'] = 'testString' + language_model_model_json['updated'] = 'testString' + language_model_model_json['language'] = 'testString' + language_model_model_json['dialect'] = 'testString' + language_model_model_json['versions'] = ['testString'] + language_model_model_json['owner'] = 'testString' + language_model_model_json['name'] = 'testString' + language_model_model_json['description'] = 'testString' + language_model_model_json['base_model_name'] = 'testString' + language_model_model_json['status'] = 'pending' + language_model_model_json['progress'] = 38 + language_model_model_json['error'] = 'testString' + language_model_model_json['warnings'] = 'testString' + + # Construct a model instance of LanguageModel by calling from_dict on the json representation + language_model_model = LanguageModel.from_dict(language_model_model_json) + assert language_model_model != False + + # Construct a model instance of LanguageModel by calling from_dict on the json representation + language_model_model_dict = LanguageModel.from_dict(language_model_model_json).__dict__ + language_model_model2 = LanguageModel(**language_model_model_dict) + + # Verify the model instances are equivalent + assert language_model_model == language_model_model2 + + # Convert model instance back to dict and verify no loss of data + language_model_model_json2 = language_model_model.to_dict() + assert language_model_model_json2 == language_model_model_json + + +class TestModel_LanguageModels: + """ + Test Class for LanguageModels + """ + + def test_language_models_serialization(self): + """ + Test serialization/deserialization for LanguageModels + """ + + # Construct dict forms of any model objects needed in order to build this model. + + language_model_model = {} # LanguageModel + language_model_model['customization_id'] = 'testString' + language_model_model['created'] = 'testString' + language_model_model['updated'] = 'testString' + language_model_model['language'] = 'testString' + language_model_model['dialect'] = 'testString' + language_model_model['versions'] = ['testString'] + language_model_model['owner'] = 'testString' + language_model_model['name'] = 'testString' + language_model_model['description'] = 'testString' + language_model_model['base_model_name'] = 'testString' + language_model_model['status'] = 'pending' + language_model_model['progress'] = 38 + language_model_model['error'] = 'testString' + language_model_model['warnings'] = 'testString' + + # Construct a json representation of a LanguageModels model + language_models_model_json = {} + language_models_model_json['customizations'] = [language_model_model] + + # Construct a model instance of LanguageModels by calling from_dict on the json representation + language_models_model = LanguageModels.from_dict(language_models_model_json) + assert language_models_model != False + + # Construct a model instance of LanguageModels by calling from_dict on the json representation + language_models_model_dict = LanguageModels.from_dict(language_models_model_json).__dict__ + language_models_model2 = LanguageModels(**language_models_model_dict) + + # Verify the model instances are equivalent + assert language_models_model == language_models_model2 + + # Convert model instance back to dict and verify no loss of data + language_models_model_json2 = language_models_model.to_dict() + assert language_models_model_json2 == language_models_model_json + + +class TestModel_ProcessedAudio: + """ + Test Class for ProcessedAudio + """ + + def test_processed_audio_serialization(self): + """ + Test serialization/deserialization for ProcessedAudio + """ + + # Construct a json representation of a ProcessedAudio model + processed_audio_model_json = {} + processed_audio_model_json['received'] = 36.0 + processed_audio_model_json['seen_by_engine'] = 36.0 + processed_audio_model_json['transcription'] = 36.0 + processed_audio_model_json['speaker_labels'] = 36.0 + + # Construct a model instance of ProcessedAudio by calling from_dict on the json representation + processed_audio_model = ProcessedAudio.from_dict(processed_audio_model_json) + assert processed_audio_model != False + + # Construct a model instance of ProcessedAudio by calling from_dict on the json representation + processed_audio_model_dict = ProcessedAudio.from_dict(processed_audio_model_json).__dict__ + processed_audio_model2 = ProcessedAudio(**processed_audio_model_dict) + + # Verify the model instances are equivalent + assert processed_audio_model == processed_audio_model2 + + # Convert model instance back to dict and verify no loss of data + processed_audio_model_json2 = processed_audio_model.to_dict() + assert processed_audio_model_json2 == processed_audio_model_json + + +class TestModel_ProcessingMetrics: + """ + Test Class for ProcessingMetrics + """ + + def test_processing_metrics_serialization(self): + """ + Test serialization/deserialization for ProcessingMetrics + """ + + # Construct dict forms of any model objects needed in order to build this model. + + processed_audio_model = {} # ProcessedAudio + processed_audio_model['received'] = 36.0 + processed_audio_model['seen_by_engine'] = 36.0 + processed_audio_model['transcription'] = 36.0 + processed_audio_model['speaker_labels'] = 36.0 + + # Construct a json representation of a ProcessingMetrics model + processing_metrics_model_json = {} + processing_metrics_model_json['processed_audio'] = processed_audio_model + processing_metrics_model_json['wall_clock_since_first_byte_received'] = 36.0 + processing_metrics_model_json['periodic'] = True + + # Construct a model instance of ProcessingMetrics by calling from_dict on the json representation + processing_metrics_model = ProcessingMetrics.from_dict(processing_metrics_model_json) + assert processing_metrics_model != False + + # Construct a model instance of ProcessingMetrics by calling from_dict on the json representation + processing_metrics_model_dict = ProcessingMetrics.from_dict(processing_metrics_model_json).__dict__ + processing_metrics_model2 = ProcessingMetrics(**processing_metrics_model_dict) + + # Verify the model instances are equivalent + assert processing_metrics_model == processing_metrics_model2 + + # Convert model instance back to dict and verify no loss of data + processing_metrics_model_json2 = processing_metrics_model.to_dict() + assert processing_metrics_model_json2 == processing_metrics_model_json + + +class TestModel_RecognitionJob: + """ + Test Class for RecognitionJob + """ + + def test_recognition_job_serialization(self): + """ + Test serialization/deserialization for RecognitionJob + """ + + # Construct dict forms of any model objects needed in order to build this model. + + speech_recognition_alternative_model = {} # SpeechRecognitionAlternative + speech_recognition_alternative_model['transcript'] = 'testString' + speech_recognition_alternative_model['confidence'] = 0 + speech_recognition_alternative_model['timestamps'] = ['testString'] + speech_recognition_alternative_model['word_confidence'] = ['testString'] + + keyword_result_model = {} # KeywordResult + keyword_result_model['normalized_text'] = 'testString' + keyword_result_model['start_time'] = 72.5 + keyword_result_model['end_time'] = 72.5 + keyword_result_model['confidence'] = 0 + + word_alternative_result_model = {} # WordAlternativeResult + word_alternative_result_model['confidence'] = 0 + word_alternative_result_model['word'] = 'testString' + + word_alternative_results_model = {} # WordAlternativeResults + word_alternative_results_model['start_time'] = 72.5 + word_alternative_results_model['end_time'] = 72.5 + word_alternative_results_model['alternatives'] = [word_alternative_result_model] + + speech_recognition_result_model = {} # SpeechRecognitionResult + speech_recognition_result_model['final'] = True + speech_recognition_result_model['alternatives'] = [speech_recognition_alternative_model] + speech_recognition_result_model['keywords_result'] = {'key1': [keyword_result_model]} + speech_recognition_result_model['word_alternatives'] = [word_alternative_results_model] + speech_recognition_result_model['end_of_utterance'] = 'end_of_data' + + speaker_labels_result_model = {} # SpeakerLabelsResult + speaker_labels_result_model['from'] = 36.0 + speaker_labels_result_model['to'] = 36.0 + speaker_labels_result_model['speaker'] = 38 + speaker_labels_result_model['confidence'] = 36.0 + speaker_labels_result_model['final'] = True + + processed_audio_model = {} # ProcessedAudio + processed_audio_model['received'] = 36.0 + processed_audio_model['seen_by_engine'] = 36.0 + processed_audio_model['transcription'] = 36.0 + processed_audio_model['speaker_labels'] = 36.0 + + processing_metrics_model = {} # ProcessingMetrics + processing_metrics_model['processed_audio'] = processed_audio_model + processing_metrics_model['wall_clock_since_first_byte_received'] = 36.0 + processing_metrics_model['periodic'] = True + + audio_metrics_histogram_bin_model = {} # AudioMetricsHistogramBin + audio_metrics_histogram_bin_model['begin'] = 36.0 + audio_metrics_histogram_bin_model['end'] = 36.0 + audio_metrics_histogram_bin_model['count'] = 38 + + audio_metrics_details_model = {} # AudioMetricsDetails + audio_metrics_details_model['final'] = True + audio_metrics_details_model['end_time'] = 36.0 + audio_metrics_details_model['signal_to_noise_ratio'] = 36.0 + audio_metrics_details_model['speech_ratio'] = 36.0 + audio_metrics_details_model['high_frequency_loss'] = 36.0 + audio_metrics_details_model['direct_current_offset'] = [audio_metrics_histogram_bin_model] + audio_metrics_details_model['clipping_rate'] = [audio_metrics_histogram_bin_model] + audio_metrics_details_model['speech_level'] = [audio_metrics_histogram_bin_model] + audio_metrics_details_model['non_speech_level'] = [audio_metrics_histogram_bin_model] + + audio_metrics_model = {} # AudioMetrics + audio_metrics_model['sampling_interval'] = 36.0 + audio_metrics_model['accumulated'] = audio_metrics_details_model + + enriched_results_transcript_timestamp_model = {} # EnrichedResultsTranscriptTimestamp + enriched_results_transcript_timestamp_model['from'] = 36.0 + enriched_results_transcript_timestamp_model['to'] = 36.0 + + enriched_results_transcript_model = {} # EnrichedResultsTranscript + enriched_results_transcript_model['text'] = 'testString' + enriched_results_transcript_model['timestamp'] = enriched_results_transcript_timestamp_model + + enriched_results_model = {} # EnrichedResults + enriched_results_model['transcript'] = enriched_results_transcript_model + enriched_results_model['status'] = 'testString' + + speech_recognition_results_model = {} # SpeechRecognitionResults + speech_recognition_results_model['results'] = [speech_recognition_result_model] + speech_recognition_results_model['result_index'] = 38 + speech_recognition_results_model['speaker_labels'] = [speaker_labels_result_model] + speech_recognition_results_model['processing_metrics'] = processing_metrics_model + speech_recognition_results_model['audio_metrics'] = audio_metrics_model + speech_recognition_results_model['warnings'] = ['testString'] + speech_recognition_results_model['enriched_results'] = enriched_results_model + + # Construct a json representation of a RecognitionJob model + recognition_job_model_json = {} + recognition_job_model_json['id'] = 'testString' + recognition_job_model_json['status'] = 'waiting' + recognition_job_model_json['created'] = 'testString' + recognition_job_model_json['updated'] = 'testString' + recognition_job_model_json['url'] = 'testString' + recognition_job_model_json['user_token'] = 'testString' + recognition_job_model_json['results'] = [speech_recognition_results_model] + recognition_job_model_json['warnings'] = ['testString'] + + # Construct a model instance of RecognitionJob by calling from_dict on the json representation + recognition_job_model = RecognitionJob.from_dict(recognition_job_model_json) + assert recognition_job_model != False + + # Construct a model instance of RecognitionJob by calling from_dict on the json representation + recognition_job_model_dict = RecognitionJob.from_dict(recognition_job_model_json).__dict__ + recognition_job_model2 = RecognitionJob(**recognition_job_model_dict) + + # Verify the model instances are equivalent + assert recognition_job_model == recognition_job_model2 + + # Convert model instance back to dict and verify no loss of data + recognition_job_model_json2 = recognition_job_model.to_dict() + assert recognition_job_model_json2 == recognition_job_model_json + + +class TestModel_RecognitionJobs: + """ + Test Class for RecognitionJobs + """ + + def test_recognition_jobs_serialization(self): + """ + Test serialization/deserialization for RecognitionJobs + """ + + # Construct dict forms of any model objects needed in order to build this model. + + speech_recognition_alternative_model = {} # SpeechRecognitionAlternative + speech_recognition_alternative_model['transcript'] = 'testString' + speech_recognition_alternative_model['confidence'] = 0 + speech_recognition_alternative_model['timestamps'] = ['testString'] + speech_recognition_alternative_model['word_confidence'] = ['testString'] + + keyword_result_model = {} # KeywordResult + keyword_result_model['normalized_text'] = 'testString' + keyword_result_model['start_time'] = 72.5 + keyword_result_model['end_time'] = 72.5 + keyword_result_model['confidence'] = 0 + + word_alternative_result_model = {} # WordAlternativeResult + word_alternative_result_model['confidence'] = 0 + word_alternative_result_model['word'] = 'testString' + + word_alternative_results_model = {} # WordAlternativeResults + word_alternative_results_model['start_time'] = 72.5 + word_alternative_results_model['end_time'] = 72.5 + word_alternative_results_model['alternatives'] = [word_alternative_result_model] + + speech_recognition_result_model = {} # SpeechRecognitionResult + speech_recognition_result_model['final'] = True + speech_recognition_result_model['alternatives'] = [speech_recognition_alternative_model] + speech_recognition_result_model['keywords_result'] = {'key1': [keyword_result_model]} + speech_recognition_result_model['word_alternatives'] = [word_alternative_results_model] + speech_recognition_result_model['end_of_utterance'] = 'end_of_data' + + speaker_labels_result_model = {} # SpeakerLabelsResult + speaker_labels_result_model['from'] = 36.0 + speaker_labels_result_model['to'] = 36.0 + speaker_labels_result_model['speaker'] = 38 + speaker_labels_result_model['confidence'] = 36.0 + speaker_labels_result_model['final'] = True + + processed_audio_model = {} # ProcessedAudio + processed_audio_model['received'] = 36.0 + processed_audio_model['seen_by_engine'] = 36.0 + processed_audio_model['transcription'] = 36.0 + processed_audio_model['speaker_labels'] = 36.0 + + processing_metrics_model = {} # ProcessingMetrics + processing_metrics_model['processed_audio'] = processed_audio_model + processing_metrics_model['wall_clock_since_first_byte_received'] = 36.0 + processing_metrics_model['periodic'] = True + + audio_metrics_histogram_bin_model = {} # AudioMetricsHistogramBin + audio_metrics_histogram_bin_model['begin'] = 36.0 + audio_metrics_histogram_bin_model['end'] = 36.0 + audio_metrics_histogram_bin_model['count'] = 38 + + audio_metrics_details_model = {} # AudioMetricsDetails + audio_metrics_details_model['final'] = True + audio_metrics_details_model['end_time'] = 36.0 + audio_metrics_details_model['signal_to_noise_ratio'] = 36.0 + audio_metrics_details_model['speech_ratio'] = 36.0 + audio_metrics_details_model['high_frequency_loss'] = 36.0 + audio_metrics_details_model['direct_current_offset'] = [audio_metrics_histogram_bin_model] + audio_metrics_details_model['clipping_rate'] = [audio_metrics_histogram_bin_model] + audio_metrics_details_model['speech_level'] = [audio_metrics_histogram_bin_model] + audio_metrics_details_model['non_speech_level'] = [audio_metrics_histogram_bin_model] + + audio_metrics_model = {} # AudioMetrics + audio_metrics_model['sampling_interval'] = 36.0 + audio_metrics_model['accumulated'] = audio_metrics_details_model + + enriched_results_transcript_timestamp_model = {} # EnrichedResultsTranscriptTimestamp + enriched_results_transcript_timestamp_model['from'] = 36.0 + enriched_results_transcript_timestamp_model['to'] = 36.0 + + enriched_results_transcript_model = {} # EnrichedResultsTranscript + enriched_results_transcript_model['text'] = 'testString' + enriched_results_transcript_model['timestamp'] = enriched_results_transcript_timestamp_model + + enriched_results_model = {} # EnrichedResults + enriched_results_model['transcript'] = enriched_results_transcript_model + enriched_results_model['status'] = 'testString' + + speech_recognition_results_model = {} # SpeechRecognitionResults + speech_recognition_results_model['results'] = [speech_recognition_result_model] + speech_recognition_results_model['result_index'] = 38 + speech_recognition_results_model['speaker_labels'] = [speaker_labels_result_model] + speech_recognition_results_model['processing_metrics'] = processing_metrics_model + speech_recognition_results_model['audio_metrics'] = audio_metrics_model + speech_recognition_results_model['warnings'] = ['testString'] + speech_recognition_results_model['enriched_results'] = enriched_results_model + + recognition_job_model = {} # RecognitionJob + recognition_job_model['id'] = 'testString' + recognition_job_model['status'] = 'waiting' + recognition_job_model['created'] = 'testString' + recognition_job_model['updated'] = 'testString' + recognition_job_model['url'] = 'testString' + recognition_job_model['user_token'] = 'testString' + recognition_job_model['results'] = [speech_recognition_results_model] + recognition_job_model['warnings'] = ['testString'] + + # Construct a json representation of a RecognitionJobs model + recognition_jobs_model_json = {} + recognition_jobs_model_json['recognitions'] = [recognition_job_model] + + # Construct a model instance of RecognitionJobs by calling from_dict on the json representation + recognition_jobs_model = RecognitionJobs.from_dict(recognition_jobs_model_json) + assert recognition_jobs_model != False + + # Construct a model instance of RecognitionJobs by calling from_dict on the json representation + recognition_jobs_model_dict = RecognitionJobs.from_dict(recognition_jobs_model_json).__dict__ + recognition_jobs_model2 = RecognitionJobs(**recognition_jobs_model_dict) + + # Verify the model instances are equivalent + assert recognition_jobs_model == recognition_jobs_model2 + + # Convert model instance back to dict and verify no loss of data + recognition_jobs_model_json2 = recognition_jobs_model.to_dict() + assert recognition_jobs_model_json2 == recognition_jobs_model_json + + +class TestModel_RegisterStatus: + """ + Test Class for RegisterStatus + """ + + def test_register_status_serialization(self): + """ + Test serialization/deserialization for RegisterStatus + """ + + # Construct a json representation of a RegisterStatus model + register_status_model_json = {} + register_status_model_json['status'] = 'created' + register_status_model_json['url'] = 'testString' + + # Construct a model instance of RegisterStatus by calling from_dict on the json representation + register_status_model = RegisterStatus.from_dict(register_status_model_json) + assert register_status_model != False + + # Construct a model instance of RegisterStatus by calling from_dict on the json representation + register_status_model_dict = RegisterStatus.from_dict(register_status_model_json).__dict__ + register_status_model2 = RegisterStatus(**register_status_model_dict) + + # Verify the model instances are equivalent + assert register_status_model == register_status_model2 + + # Convert model instance back to dict and verify no loss of data + register_status_model_json2 = register_status_model.to_dict() + assert register_status_model_json2 == register_status_model_json + + +class TestModel_SpeakerLabelsResult: + """ + Test Class for SpeakerLabelsResult + """ + + def test_speaker_labels_result_serialization(self): + """ + Test serialization/deserialization for SpeakerLabelsResult + """ + + # Construct a json representation of a SpeakerLabelsResult model + speaker_labels_result_model_json = {} + speaker_labels_result_model_json['from'] = 36.0 + speaker_labels_result_model_json['to'] = 36.0 + speaker_labels_result_model_json['speaker'] = 38 + speaker_labels_result_model_json['confidence'] = 36.0 + speaker_labels_result_model_json['final'] = True + + # Construct a model instance of SpeakerLabelsResult by calling from_dict on the json representation + speaker_labels_result_model = SpeakerLabelsResult.from_dict(speaker_labels_result_model_json) + assert speaker_labels_result_model != False + + # Construct a model instance of SpeakerLabelsResult by calling from_dict on the json representation + speaker_labels_result_model_dict = SpeakerLabelsResult.from_dict(speaker_labels_result_model_json).__dict__ + speaker_labels_result_model2 = SpeakerLabelsResult(**speaker_labels_result_model_dict) + + # Verify the model instances are equivalent + assert speaker_labels_result_model == speaker_labels_result_model2 + + # Convert model instance back to dict and verify no loss of data + speaker_labels_result_model_json2 = speaker_labels_result_model.to_dict() + assert speaker_labels_result_model_json2 == speaker_labels_result_model_json + + +class TestModel_SpeechModel: + """ + Test Class for SpeechModel + """ + + def test_speech_model_serialization(self): + """ + Test serialization/deserialization for SpeechModel + """ + + # Construct dict forms of any model objects needed in order to build this model. + + supported_features_model = {} # SupportedFeatures + supported_features_model['custom_language_model'] = True + supported_features_model['custom_acoustic_model'] = True + supported_features_model['speaker_labels'] = True + supported_features_model['low_latency'] = True + + # Construct a json representation of a SpeechModel model + speech_model_model_json = {} + speech_model_model_json['name'] = 'testString' + speech_model_model_json['language'] = 'testString' + speech_model_model_json['rate'] = 38 + speech_model_model_json['url'] = 'testString' + speech_model_model_json['supported_features'] = supported_features_model + speech_model_model_json['description'] = 'testString' + + # Construct a model instance of SpeechModel by calling from_dict on the json representation + speech_model_model = SpeechModel.from_dict(speech_model_model_json) + assert speech_model_model != False + + # Construct a model instance of SpeechModel by calling from_dict on the json representation + speech_model_model_dict = SpeechModel.from_dict(speech_model_model_json).__dict__ + speech_model_model2 = SpeechModel(**speech_model_model_dict) + + # Verify the model instances are equivalent + assert speech_model_model == speech_model_model2 + + # Convert model instance back to dict and verify no loss of data + speech_model_model_json2 = speech_model_model.to_dict() + assert speech_model_model_json2 == speech_model_model_json + + +class TestModel_SpeechModels: + """ + Test Class for SpeechModels + """ + + def test_speech_models_serialization(self): + """ + Test serialization/deserialization for SpeechModels + """ + + # Construct dict forms of any model objects needed in order to build this model. + + supported_features_model = {} # SupportedFeatures + supported_features_model['custom_language_model'] = True + supported_features_model['custom_acoustic_model'] = True + supported_features_model['speaker_labels'] = True + supported_features_model['low_latency'] = True + + speech_model_model = {} # SpeechModel + speech_model_model['name'] = 'testString' + speech_model_model['language'] = 'testString' + speech_model_model['rate'] = 38 + speech_model_model['url'] = 'testString' + speech_model_model['supported_features'] = supported_features_model + speech_model_model['description'] = 'testString' + + # Construct a json representation of a SpeechModels model + speech_models_model_json = {} + speech_models_model_json['models'] = [speech_model_model] + + # Construct a model instance of SpeechModels by calling from_dict on the json representation + speech_models_model = SpeechModels.from_dict(speech_models_model_json) + assert speech_models_model != False + + # Construct a model instance of SpeechModels by calling from_dict on the json representation + speech_models_model_dict = SpeechModels.from_dict(speech_models_model_json).__dict__ + speech_models_model2 = SpeechModels(**speech_models_model_dict) + + # Verify the model instances are equivalent + assert speech_models_model == speech_models_model2 + + # Convert model instance back to dict and verify no loss of data + speech_models_model_json2 = speech_models_model.to_dict() + assert speech_models_model_json2 == speech_models_model_json + + +class TestModel_SpeechRecognitionAlternative: + """ + Test Class for SpeechRecognitionAlternative + """ + + def test_speech_recognition_alternative_serialization(self): + """ + Test serialization/deserialization for SpeechRecognitionAlternative + """ + + # Construct a json representation of a SpeechRecognitionAlternative model + speech_recognition_alternative_model_json = {} + speech_recognition_alternative_model_json['transcript'] = 'testString' + speech_recognition_alternative_model_json['confidence'] = 0 + speech_recognition_alternative_model_json['timestamps'] = ['testString'] + speech_recognition_alternative_model_json['word_confidence'] = ['testString'] + + # Construct a model instance of SpeechRecognitionAlternative by calling from_dict on the json representation + speech_recognition_alternative_model = SpeechRecognitionAlternative.from_dict(speech_recognition_alternative_model_json) + assert speech_recognition_alternative_model != False + + # Construct a model instance of SpeechRecognitionAlternative by calling from_dict on the json representation + speech_recognition_alternative_model_dict = SpeechRecognitionAlternative.from_dict(speech_recognition_alternative_model_json).__dict__ + speech_recognition_alternative_model2 = SpeechRecognitionAlternative(**speech_recognition_alternative_model_dict) + + # Verify the model instances are equivalent + assert speech_recognition_alternative_model == speech_recognition_alternative_model2 + + # Convert model instance back to dict and verify no loss of data + speech_recognition_alternative_model_json2 = speech_recognition_alternative_model.to_dict() + assert speech_recognition_alternative_model_json2 == speech_recognition_alternative_model_json + + +class TestModel_SpeechRecognitionResult: + """ + Test Class for SpeechRecognitionResult + """ + + def test_speech_recognition_result_serialization(self): + """ + Test serialization/deserialization for SpeechRecognitionResult + """ + + # Construct dict forms of any model objects needed in order to build this model. + + speech_recognition_alternative_model = {} # SpeechRecognitionAlternative + speech_recognition_alternative_model['transcript'] = 'testString' + speech_recognition_alternative_model['confidence'] = 0 + speech_recognition_alternative_model['timestamps'] = ['testString'] + speech_recognition_alternative_model['word_confidence'] = ['testString'] + + keyword_result_model = {} # KeywordResult + keyword_result_model['normalized_text'] = 'testString' + keyword_result_model['start_time'] = 72.5 + keyword_result_model['end_time'] = 72.5 + keyword_result_model['confidence'] = 0 + + word_alternative_result_model = {} # WordAlternativeResult + word_alternative_result_model['confidence'] = 0 + word_alternative_result_model['word'] = 'testString' + + word_alternative_results_model = {} # WordAlternativeResults + word_alternative_results_model['start_time'] = 72.5 + word_alternative_results_model['end_time'] = 72.5 + word_alternative_results_model['alternatives'] = [word_alternative_result_model] + + # Construct a json representation of a SpeechRecognitionResult model + speech_recognition_result_model_json = {} + speech_recognition_result_model_json['final'] = True + speech_recognition_result_model_json['alternatives'] = [speech_recognition_alternative_model] + speech_recognition_result_model_json['keywords_result'] = {'key1': [keyword_result_model]} + speech_recognition_result_model_json['word_alternatives'] = [word_alternative_results_model] + speech_recognition_result_model_json['end_of_utterance'] = 'end_of_data' + + # Construct a model instance of SpeechRecognitionResult by calling from_dict on the json representation + speech_recognition_result_model = SpeechRecognitionResult.from_dict(speech_recognition_result_model_json) + assert speech_recognition_result_model != False + + # Construct a model instance of SpeechRecognitionResult by calling from_dict on the json representation + speech_recognition_result_model_dict = SpeechRecognitionResult.from_dict(speech_recognition_result_model_json).__dict__ + speech_recognition_result_model2 = SpeechRecognitionResult(**speech_recognition_result_model_dict) + + # Verify the model instances are equivalent + assert speech_recognition_result_model == speech_recognition_result_model2 + + # Convert model instance back to dict and verify no loss of data + speech_recognition_result_model_json2 = speech_recognition_result_model.to_dict() + assert speech_recognition_result_model_json2 == speech_recognition_result_model_json + + +class TestModel_SpeechRecognitionResults: + """ + Test Class for SpeechRecognitionResults + """ + + def test_speech_recognition_results_serialization(self): + """ + Test serialization/deserialization for SpeechRecognitionResults + """ + + # Construct dict forms of any model objects needed in order to build this model. + + speech_recognition_alternative_model = {} # SpeechRecognitionAlternative + speech_recognition_alternative_model['transcript'] = 'testString' + speech_recognition_alternative_model['confidence'] = 0 + speech_recognition_alternative_model['timestamps'] = ['testString'] + speech_recognition_alternative_model['word_confidence'] = ['testString'] + + keyword_result_model = {} # KeywordResult + keyword_result_model['normalized_text'] = 'testString' + keyword_result_model['start_time'] = 72.5 + keyword_result_model['end_time'] = 72.5 + keyword_result_model['confidence'] = 0 + + word_alternative_result_model = {} # WordAlternativeResult + word_alternative_result_model['confidence'] = 0 + word_alternative_result_model['word'] = 'testString' + + word_alternative_results_model = {} # WordAlternativeResults + word_alternative_results_model['start_time'] = 72.5 + word_alternative_results_model['end_time'] = 72.5 + word_alternative_results_model['alternatives'] = [word_alternative_result_model] + + speech_recognition_result_model = {} # SpeechRecognitionResult + speech_recognition_result_model['final'] = True + speech_recognition_result_model['alternatives'] = [speech_recognition_alternative_model] + speech_recognition_result_model['keywords_result'] = {'key1': [keyword_result_model]} + speech_recognition_result_model['word_alternatives'] = [word_alternative_results_model] + speech_recognition_result_model['end_of_utterance'] = 'end_of_data' + + speaker_labels_result_model = {} # SpeakerLabelsResult + speaker_labels_result_model['from'] = 36.0 + speaker_labels_result_model['to'] = 36.0 + speaker_labels_result_model['speaker'] = 38 + speaker_labels_result_model['confidence'] = 36.0 + speaker_labels_result_model['final'] = True + + processed_audio_model = {} # ProcessedAudio + processed_audio_model['received'] = 36.0 + processed_audio_model['seen_by_engine'] = 36.0 + processed_audio_model['transcription'] = 36.0 + processed_audio_model['speaker_labels'] = 36.0 + + processing_metrics_model = {} # ProcessingMetrics + processing_metrics_model['processed_audio'] = processed_audio_model + processing_metrics_model['wall_clock_since_first_byte_received'] = 36.0 + processing_metrics_model['periodic'] = True + + audio_metrics_histogram_bin_model = {} # AudioMetricsHistogramBin + audio_metrics_histogram_bin_model['begin'] = 36.0 + audio_metrics_histogram_bin_model['end'] = 36.0 + audio_metrics_histogram_bin_model['count'] = 38 + + audio_metrics_details_model = {} # AudioMetricsDetails + audio_metrics_details_model['final'] = True + audio_metrics_details_model['end_time'] = 36.0 + audio_metrics_details_model['signal_to_noise_ratio'] = 36.0 + audio_metrics_details_model['speech_ratio'] = 36.0 + audio_metrics_details_model['high_frequency_loss'] = 36.0 + audio_metrics_details_model['direct_current_offset'] = [audio_metrics_histogram_bin_model] + audio_metrics_details_model['clipping_rate'] = [audio_metrics_histogram_bin_model] + audio_metrics_details_model['speech_level'] = [audio_metrics_histogram_bin_model] + audio_metrics_details_model['non_speech_level'] = [audio_metrics_histogram_bin_model] + + audio_metrics_model = {} # AudioMetrics + audio_metrics_model['sampling_interval'] = 36.0 + audio_metrics_model['accumulated'] = audio_metrics_details_model + + enriched_results_transcript_timestamp_model = {} # EnrichedResultsTranscriptTimestamp + enriched_results_transcript_timestamp_model['from'] = 36.0 + enriched_results_transcript_timestamp_model['to'] = 36.0 + + enriched_results_transcript_model = {} # EnrichedResultsTranscript + enriched_results_transcript_model['text'] = 'testString' + enriched_results_transcript_model['timestamp'] = enriched_results_transcript_timestamp_model + + enriched_results_model = {} # EnrichedResults + enriched_results_model['transcript'] = enriched_results_transcript_model + enriched_results_model['status'] = 'testString' + + # Construct a json representation of a SpeechRecognitionResults model + speech_recognition_results_model_json = {} + speech_recognition_results_model_json['results'] = [speech_recognition_result_model] + speech_recognition_results_model_json['result_index'] = 38 + speech_recognition_results_model_json['speaker_labels'] = [speaker_labels_result_model] + speech_recognition_results_model_json['processing_metrics'] = processing_metrics_model + speech_recognition_results_model_json['audio_metrics'] = audio_metrics_model + speech_recognition_results_model_json['warnings'] = ['testString'] + speech_recognition_results_model_json['enriched_results'] = enriched_results_model + + # Construct a model instance of SpeechRecognitionResults by calling from_dict on the json representation + speech_recognition_results_model = SpeechRecognitionResults.from_dict(speech_recognition_results_model_json) + assert speech_recognition_results_model != False + + # Construct a model instance of SpeechRecognitionResults by calling from_dict on the json representation + speech_recognition_results_model_dict = SpeechRecognitionResults.from_dict(speech_recognition_results_model_json).__dict__ + speech_recognition_results_model2 = SpeechRecognitionResults(**speech_recognition_results_model_dict) + + # Verify the model instances are equivalent + assert speech_recognition_results_model == speech_recognition_results_model2 + + # Convert model instance back to dict and verify no loss of data + speech_recognition_results_model_json2 = speech_recognition_results_model.to_dict() + assert speech_recognition_results_model_json2 == speech_recognition_results_model_json + + +class TestModel_SupportedFeatures: + """ + Test Class for SupportedFeatures + """ + + def test_supported_features_serialization(self): + """ + Test serialization/deserialization for SupportedFeatures + """ + + # Construct a json representation of a SupportedFeatures model + supported_features_model_json = {} + supported_features_model_json['custom_language_model'] = True + supported_features_model_json['custom_acoustic_model'] = True + supported_features_model_json['speaker_labels'] = True + supported_features_model_json['low_latency'] = True + + # Construct a model instance of SupportedFeatures by calling from_dict on the json representation + supported_features_model = SupportedFeatures.from_dict(supported_features_model_json) + assert supported_features_model != False + + # Construct a model instance of SupportedFeatures by calling from_dict on the json representation + supported_features_model_dict = SupportedFeatures.from_dict(supported_features_model_json).__dict__ + supported_features_model2 = SupportedFeatures(**supported_features_model_dict) + + # Verify the model instances are equivalent + assert supported_features_model == supported_features_model2 + + # Convert model instance back to dict and verify no loss of data + supported_features_model_json2 = supported_features_model.to_dict() + assert supported_features_model_json2 == supported_features_model_json + + +class TestModel_TrainingResponse: + """ + Test Class for TrainingResponse + """ + + def test_training_response_serialization(self): + """ + Test serialization/deserialization for TrainingResponse + """ + + # Construct dict forms of any model objects needed in order to build this model. + + training_warning_model = {} # TrainingWarning + training_warning_model['code'] = 'invalid_audio_files' + training_warning_model['message'] = 'testString' + + # Construct a json representation of a TrainingResponse model + training_response_model_json = {} + training_response_model_json['warnings'] = [training_warning_model] + + # Construct a model instance of TrainingResponse by calling from_dict on the json representation + training_response_model = TrainingResponse.from_dict(training_response_model_json) + assert training_response_model != False + + # Construct a model instance of TrainingResponse by calling from_dict on the json representation + training_response_model_dict = TrainingResponse.from_dict(training_response_model_json).__dict__ + training_response_model2 = TrainingResponse(**training_response_model_dict) + + # Verify the model instances are equivalent + assert training_response_model == training_response_model2 + + # Convert model instance back to dict and verify no loss of data + training_response_model_json2 = training_response_model.to_dict() + assert training_response_model_json2 == training_response_model_json + + +class TestModel_TrainingWarning: + """ + Test Class for TrainingWarning + """ + + def test_training_warning_serialization(self): + """ + Test serialization/deserialization for TrainingWarning + """ + + # Construct a json representation of a TrainingWarning model + training_warning_model_json = {} + training_warning_model_json['code'] = 'invalid_audio_files' + training_warning_model_json['message'] = 'testString' + + # Construct a model instance of TrainingWarning by calling from_dict on the json representation + training_warning_model = TrainingWarning.from_dict(training_warning_model_json) + assert training_warning_model != False + + # Construct a model instance of TrainingWarning by calling from_dict on the json representation + training_warning_model_dict = TrainingWarning.from_dict(training_warning_model_json).__dict__ + training_warning_model2 = TrainingWarning(**training_warning_model_dict) + + # Verify the model instances are equivalent + assert training_warning_model == training_warning_model2 + + # Convert model instance back to dict and verify no loss of data + training_warning_model_json2 = training_warning_model.to_dict() + assert training_warning_model_json2 == training_warning_model_json + + +class TestModel_Word: + """ + Test Class for Word + """ + + def test_word_serialization(self): + """ + Test serialization/deserialization for Word + """ + + # Construct dict forms of any model objects needed in order to build this model. + + word_error_model = {} # WordError + word_error_model['element'] = 'testString' + + # Construct a json representation of a Word model + word_model_json = {} + word_model_json['word'] = 'testString' + word_model_json['mapping_only'] = ['testString'] + word_model_json['sounds_like'] = ['testString'] + word_model_json['display_as'] = 'testString' + word_model_json['count'] = 38 + word_model_json['source'] = ['testString'] + word_model_json['error'] = [word_error_model] + + # Construct a model instance of Word by calling from_dict on the json representation + word_model = Word.from_dict(word_model_json) + assert word_model != False + + # Construct a model instance of Word by calling from_dict on the json representation + word_model_dict = Word.from_dict(word_model_json).__dict__ + word_model2 = Word(**word_model_dict) + + # Verify the model instances are equivalent + assert word_model == word_model2 + + # Convert model instance back to dict and verify no loss of data + word_model_json2 = word_model.to_dict() + assert word_model_json2 == word_model_json + + +class TestModel_WordAlternativeResult: + """ + Test Class for WordAlternativeResult + """ + + def test_word_alternative_result_serialization(self): + """ + Test serialization/deserialization for WordAlternativeResult + """ + + # Construct a json representation of a WordAlternativeResult model + word_alternative_result_model_json = {} + word_alternative_result_model_json['confidence'] = 0 + word_alternative_result_model_json['word'] = 'testString' + + # Construct a model instance of WordAlternativeResult by calling from_dict on the json representation + word_alternative_result_model = WordAlternativeResult.from_dict(word_alternative_result_model_json) + assert word_alternative_result_model != False + + # Construct a model instance of WordAlternativeResult by calling from_dict on the json representation + word_alternative_result_model_dict = WordAlternativeResult.from_dict(word_alternative_result_model_json).__dict__ + word_alternative_result_model2 = WordAlternativeResult(**word_alternative_result_model_dict) + + # Verify the model instances are equivalent + assert word_alternative_result_model == word_alternative_result_model2 + + # Convert model instance back to dict and verify no loss of data + word_alternative_result_model_json2 = word_alternative_result_model.to_dict() + assert word_alternative_result_model_json2 == word_alternative_result_model_json + + +class TestModel_WordAlternativeResults: + """ + Test Class for WordAlternativeResults + """ + + def test_word_alternative_results_serialization(self): + """ + Test serialization/deserialization for WordAlternativeResults + """ + + # Construct dict forms of any model objects needed in order to build this model. + + word_alternative_result_model = {} # WordAlternativeResult + word_alternative_result_model['confidence'] = 0 + word_alternative_result_model['word'] = 'testString' + + # Construct a json representation of a WordAlternativeResults model + word_alternative_results_model_json = {} + word_alternative_results_model_json['start_time'] = 72.5 + word_alternative_results_model_json['end_time'] = 72.5 + word_alternative_results_model_json['alternatives'] = [word_alternative_result_model] + + # Construct a model instance of WordAlternativeResults by calling from_dict on the json representation + word_alternative_results_model = WordAlternativeResults.from_dict(word_alternative_results_model_json) + assert word_alternative_results_model != False + + # Construct a model instance of WordAlternativeResults by calling from_dict on the json representation + word_alternative_results_model_dict = WordAlternativeResults.from_dict(word_alternative_results_model_json).__dict__ + word_alternative_results_model2 = WordAlternativeResults(**word_alternative_results_model_dict) + + # Verify the model instances are equivalent + assert word_alternative_results_model == word_alternative_results_model2 + + # Convert model instance back to dict and verify no loss of data + word_alternative_results_model_json2 = word_alternative_results_model.to_dict() + assert word_alternative_results_model_json2 == word_alternative_results_model_json + + +class TestModel_WordError: + """ + Test Class for WordError + """ + + def test_word_error_serialization(self): + """ + Test serialization/deserialization for WordError + """ + + # Construct a json representation of a WordError model + word_error_model_json = {} + word_error_model_json['element'] = 'testString' + + # Construct a model instance of WordError by calling from_dict on the json representation + word_error_model = WordError.from_dict(word_error_model_json) + assert word_error_model != False + + # Construct a model instance of WordError by calling from_dict on the json representation + word_error_model_dict = WordError.from_dict(word_error_model_json).__dict__ + word_error_model2 = WordError(**word_error_model_dict) + + # Verify the model instances are equivalent + assert word_error_model == word_error_model2 + + # Convert model instance back to dict and verify no loss of data + word_error_model_json2 = word_error_model.to_dict() + assert word_error_model_json2 == word_error_model_json + + +class TestModel_Words: + """ + Test Class for Words + """ + + def test_words_serialization(self): + """ + Test serialization/deserialization for Words + """ + + # Construct dict forms of any model objects needed in order to build this model. + + word_error_model = {} # WordError + word_error_model['element'] = 'testString' + + word_model = {} # Word + word_model['word'] = 'testString' + word_model['mapping_only'] = ['testString'] + word_model['sounds_like'] = ['testString'] + word_model['display_as'] = 'testString' + word_model['count'] = 38 + word_model['source'] = ['testString'] + word_model['error'] = [word_error_model] + + # Construct a json representation of a Words model + words_model_json = {} + words_model_json['words'] = [word_model] + + # Construct a model instance of Words by calling from_dict on the json representation + words_model = Words.from_dict(words_model_json) + assert words_model != False + + # Construct a model instance of Words by calling from_dict on the json representation + words_model_dict = Words.from_dict(words_model_json).__dict__ + words_model2 = Words(**words_model_dict) + + # Verify the model instances are equivalent + assert words_model == words_model2 + + # Convert model instance back to dict and verify no loss of data + words_model_json2 = words_model.to_dict() + assert words_model_json2 == words_model_json + + +# endregion +############################################################################## +# End of Model Tests +############################################################################## diff --git a/test/unit/test_text_to_speech_v1.py b/test/unit/test_text_to_speech_v1.py index c18f5e629..e151e4503 100644 --- a/test/unit/test_text_to_speech_v1.py +++ b/test/unit/test_text_to_speech_v1.py @@ -1,245 +1,2889 @@ -# coding=utf-8 -import responses -import ibm_watson +# -*- coding: utf-8 -*- +# (C) Copyright IBM Corp. 2015, 2024. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Unit Tests for TextToSpeechV1 +""" + +from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator +import inspect +import io import json +import pytest +import re +import requests +import responses +import tempfile +import urllib +from ibm_watson.text_to_speech_v1 import * + + +_service = TextToSpeechV1( + authenticator=NoAuthAuthenticator() +) + +_base_url = 'https://api.us-south.text-to-speech.watson.cloud.ibm.com' +_service.set_service_url(_base_url) + + +def preprocess_url(operation_path: str): + """ + Returns the request url associated with the specified operation path. + This will be base_url concatenated with a quoted version of operation_path. + The returned request URL is used to register the mock response so it needs + to match the request URL that is formed by the requests library. + """ + + # Form the request URL from the base URL and operation path. + request_url = _base_url + operation_path + + # If the request url does NOT end with a /, then just return it as-is. + # Otherwise, return a regular expression that matches one or more trailing /. + if not request_url.endswith('/'): + return request_url + return re.compile(request_url.rstrip('/') + '/+') + + +############################################################################## +# Start of Service: Voices +############################################################################## +# region + + +class TestListVoices: + """ + Test Class for list_voices + """ + + @responses.activate + def test_list_voices_all_params(self): + """ + list_voices() + """ + # Set up mock + url = preprocess_url('/v1/voices') + mock_response = '{"voices": [{"url": "url", "gender": "gender", "name": "name", "language": "language", "description": "description", "customizable": true, "supported_features": {"custom_pronunciation": true, "voice_transformation": true}, "customization": {"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}], "prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}]}}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Invoke method + response = _service.list_voices() + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_voices_all_params_with_retries(self): + # Enable retries and run test_list_voices_all_params. + _service.enable_retries() + self.test_list_voices_all_params() + + # Disable retries and run test_list_voices_all_params. + _service.disable_retries() + self.test_list_voices_all_params() + + +class TestGetVoice: + """ + Test Class for get_voice + """ + + @responses.activate + def test_get_voice_all_params(self): + """ + get_voice() + """ + # Set up mock + url = preprocess_url('/v1/voices/de-DE_BirgitV3Voice') + mock_response = '{"url": "url", "gender": "gender", "name": "name", "language": "language", "description": "description", "customizable": true, "supported_features": {"custom_pronunciation": true, "voice_transformation": true}, "customization": {"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}], "prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}]}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + voice = 'de-DE_BirgitV3Voice' + customization_id = 'testString' + + # Invoke method + response = _service.get_voice( + voice, + customization_id=customization_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'customization_id={}'.format(customization_id) in query_string + + def test_get_voice_all_params_with_retries(self): + # Enable retries and run test_get_voice_all_params. + _service.enable_retries() + self.test_get_voice_all_params() + + # Disable retries and run test_get_voice_all_params. + _service.disable_retries() + self.test_get_voice_all_params() + + @responses.activate + def test_get_voice_required_params(self): + """ + test_get_voice_required_params() + """ + # Set up mock + url = preprocess_url('/v1/voices/de-DE_BirgitV3Voice') + mock_response = '{"url": "url", "gender": "gender", "name": "name", "language": "language", "description": "description", "customizable": true, "supported_features": {"custom_pronunciation": true, "voice_transformation": true}, "customization": {"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}], "prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}]}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + voice = 'de-DE_BirgitV3Voice' + + # Invoke method + response = _service.get_voice( + voice, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_get_voice_required_params_with_retries(self): + # Enable retries and run test_get_voice_required_params. + _service.enable_retries() + self.test_get_voice_required_params() + + # Disable retries and run test_get_voice_required_params. + _service.disable_retries() + self.test_get_voice_required_params() + + @responses.activate + def test_get_voice_value_error(self): + """ + test_get_voice_value_error() + """ + # Set up mock + url = preprocess_url('/v1/voices/de-DE_BirgitV3Voice') + mock_response = '{"url": "url", "gender": "gender", "name": "name", "language": "language", "description": "description", "customizable": true, "supported_features": {"custom_pronunciation": true, "voice_transformation": true}, "customization": {"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}], "prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}]}}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + voice = 'de-DE_BirgitV3Voice' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "voice": voice, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_voice(**req_copy) + + def test_get_voice_value_error_with_retries(self): + # Enable retries and run test_get_voice_value_error. + _service.enable_retries() + self.test_get_voice_value_error() + + # Disable retries and run test_get_voice_value_error. + _service.disable_retries() + self.test_get_voice_value_error() + + +# endregion +############################################################################## +# End of Service: Voices +############################################################################## + +############################################################################## +# Start of Service: Synthesis +############################################################################## +# region + + +class TestSynthesize: + """ + Test Class for synthesize + """ + + @responses.activate + def test_synthesize_all_params(self): + """ + synthesize() + """ + # Set up mock + url = preprocess_url('/v1/synthesize') + mock_response = 'This is a mock binary response.' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='audio/alaw', + status=200, + ) + + # Set up parameter values + text = 'testString' + accept = 'audio/ogg;codecs=opus' + voice = 'en-US_MichaelV3Voice' + customization_id = 'testString' + spell_out_mode = 'default' + rate_percentage = 0 + pitch_percentage = 0 + + # Invoke method + response = _service.synthesize( + text, + accept=accept, + voice=voice, + customization_id=customization_id, + spell_out_mode=spell_out_mode, + rate_percentage=rate_percentage, + pitch_percentage=pitch_percentage, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'voice={}'.format(voice) in query_string + assert 'customization_id={}'.format(customization_id) in query_string + assert 'spell_out_mode={}'.format(spell_out_mode) in query_string + assert 'rate_percentage={}'.format(rate_percentage) in query_string + assert 'pitch_percentage={}'.format(pitch_percentage) in query_string + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['text'] == 'testString' + + def test_synthesize_all_params_with_retries(self): + # Enable retries and run test_synthesize_all_params. + _service.enable_retries() + self.test_synthesize_all_params() + + # Disable retries and run test_synthesize_all_params. + _service.disable_retries() + self.test_synthesize_all_params() + + @responses.activate + def test_synthesize_required_params(self): + """ + test_synthesize_required_params() + """ + # Set up mock + url = preprocess_url('/v1/synthesize') + mock_response = 'This is a mock binary response.' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='audio/alaw', + status=200, + ) + + # Set up parameter values + text = 'testString' + + # Invoke method + response = _service.synthesize( + text, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['text'] == 'testString' + + def test_synthesize_required_params_with_retries(self): + # Enable retries and run test_synthesize_required_params. + _service.enable_retries() + self.test_synthesize_required_params() + + # Disable retries and run test_synthesize_required_params. + _service.disable_retries() + self.test_synthesize_required_params() + + @responses.activate + def test_synthesize_value_error(self): + """ + test_synthesize_value_error() + """ + # Set up mock + url = preprocess_url('/v1/synthesize') + mock_response = 'This is a mock binary response.' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='audio/alaw', + status=200, + ) + + # Set up parameter values + text = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "text": text, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.synthesize(**req_copy) + + def test_synthesize_value_error_with_retries(self): + # Enable retries and run test_synthesize_value_error. + _service.enable_retries() + self.test_synthesize_value_error() + + # Disable retries and run test_synthesize_value_error. + _service.disable_retries() + self.test_synthesize_value_error() + + +# endregion +############################################################################## +# End of Service: Synthesis +############################################################################## + +############################################################################## +# Start of Service: Pronunciation +############################################################################## +# region + + +class TestGetPronunciation: + """ + Test Class for get_pronunciation + """ + + @responses.activate + def test_get_pronunciation_all_params(self): + """ + get_pronunciation() + """ + # Set up mock + url = preprocess_url('/v1/pronunciation') + mock_response = '{"pronunciation": "pronunciation"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + text = 'testString' + voice = 'en-US_MichaelV3Voice' + format = 'ipa' + customization_id = 'testString' + + # Invoke method + response = _service.get_pronunciation( + text, + voice=voice, + format=format, + customization_id=customization_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'text={}'.format(text) in query_string + assert 'voice={}'.format(voice) in query_string + assert 'format={}'.format(format) in query_string + assert 'customization_id={}'.format(customization_id) in query_string + + def test_get_pronunciation_all_params_with_retries(self): + # Enable retries and run test_get_pronunciation_all_params. + _service.enable_retries() + self.test_get_pronunciation_all_params() + + # Disable retries and run test_get_pronunciation_all_params. + _service.disable_retries() + self.test_get_pronunciation_all_params() + + @responses.activate + def test_get_pronunciation_required_params(self): + """ + test_get_pronunciation_required_params() + """ + # Set up mock + url = preprocess_url('/v1/pronunciation') + mock_response = '{"pronunciation": "pronunciation"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + text = 'testString' + + # Invoke method + response = _service.get_pronunciation( + text, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'text={}'.format(text) in query_string + + def test_get_pronunciation_required_params_with_retries(self): + # Enable retries and run test_get_pronunciation_required_params. + _service.enable_retries() + self.test_get_pronunciation_required_params() + + # Disable retries and run test_get_pronunciation_required_params. + _service.disable_retries() + self.test_get_pronunciation_required_params() + + @responses.activate + def test_get_pronunciation_value_error(self): + """ + test_get_pronunciation_value_error() + """ + # Set up mock + url = preprocess_url('/v1/pronunciation') + mock_response = '{"pronunciation": "pronunciation"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + text = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "text": text, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_pronunciation(**req_copy) + + def test_get_pronunciation_value_error_with_retries(self): + # Enable retries and run test_get_pronunciation_value_error. + _service.enable_retries() + self.test_get_pronunciation_value_error() + + # Disable retries and run test_get_pronunciation_value_error. + _service.disable_retries() + self.test_get_pronunciation_value_error() + + +# endregion +############################################################################## +# End of Service: Pronunciation +############################################################################## + +############################################################################## +# Start of Service: CustomModels +############################################################################## +# region + + +class TestCreateCustomModel: + """ + Test Class for create_custom_model + """ + + @responses.activate + def test_create_custom_model_all_params(self): + """ + create_custom_model() + """ + # Set up mock + url = preprocess_url('/v1/customizations') + mock_response = '{"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}], "prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Set up parameter values + name = 'testString' + language = 'en-US' + description = 'testString' + + # Invoke method + response = _service.create_custom_model( + name, + language=language, + description=description, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['name'] == 'testString' + assert req_body['language'] == 'en-US' + assert req_body['description'] == 'testString' + + def test_create_custom_model_all_params_with_retries(self): + # Enable retries and run test_create_custom_model_all_params. + _service.enable_retries() + self.test_create_custom_model_all_params() + + # Disable retries and run test_create_custom_model_all_params. + _service.disable_retries() + self.test_create_custom_model_all_params() + + @responses.activate + def test_create_custom_model_value_error(self): + """ + test_create_custom_model_value_error() + """ + # Set up mock + url = preprocess_url('/v1/customizations') + mock_response = '{"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}], "prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}]}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Set up parameter values + name = 'testString' + language = 'en-US' + description = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "name": name, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.create_custom_model(**req_copy) + + def test_create_custom_model_value_error_with_retries(self): + # Enable retries and run test_create_custom_model_value_error. + _service.enable_retries() + self.test_create_custom_model_value_error() + + # Disable retries and run test_create_custom_model_value_error. + _service.disable_retries() + self.test_create_custom_model_value_error() + + +class TestListCustomModels: + """ + Test Class for list_custom_models + """ + + @responses.activate + def test_list_custom_models_all_params(self): + """ + list_custom_models() + """ + # Set up mock + url = preprocess_url('/v1/customizations') + mock_response = '{"customizations": [{"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}], "prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}]}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + language = 'de-DE' + + # Invoke method + response = _service.list_custom_models( + language=language, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'language={}'.format(language) in query_string + + def test_list_custom_models_all_params_with_retries(self): + # Enable retries and run test_list_custom_models_all_params. + _service.enable_retries() + self.test_list_custom_models_all_params() + + # Disable retries and run test_list_custom_models_all_params. + _service.disable_retries() + self.test_list_custom_models_all_params() + + @responses.activate + def test_list_custom_models_required_params(self): + """ + test_list_custom_models_required_params() + """ + # Set up mock + url = preprocess_url('/v1/customizations') + mock_response = '{"customizations": [{"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}], "prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}]}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Invoke method + response = _service.list_custom_models() + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_custom_models_required_params_with_retries(self): + # Enable retries and run test_list_custom_models_required_params. + _service.enable_retries() + self.test_list_custom_models_required_params() + + # Disable retries and run test_list_custom_models_required_params. + _service.disable_retries() + self.test_list_custom_models_required_params() + + +class TestUpdateCustomModel: + """ + Test Class for update_custom_model + """ + + @responses.activate + def test_update_custom_model_all_params(self): + """ + update_custom_model() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString') + responses.add( + responses.POST, + url, + status=200, + ) + + # Construct a dict representation of a Word model + word_model = {} + word_model['word'] = 'testString' + word_model['translation'] = 'testString' + word_model['part_of_speech'] = 'Dosi' + + # Set up parameter values + customization_id = 'testString' + name = 'testString' + description = 'testString' + words = [word_model] + + # Invoke method + response = _service.update_custom_model( + customization_id, + name=name, + description=description, + words=words, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['name'] == 'testString' + assert req_body['description'] == 'testString' + assert req_body['words'] == [word_model] + + def test_update_custom_model_all_params_with_retries(self): + # Enable retries and run test_update_custom_model_all_params. + _service.enable_retries() + self.test_update_custom_model_all_params() + + # Disable retries and run test_update_custom_model_all_params. + _service.disable_retries() + self.test_update_custom_model_all_params() + + @responses.activate + def test_update_custom_model_value_error(self): + """ + test_update_custom_model_value_error() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString') + responses.add( + responses.POST, + url, + status=200, + ) + + # Construct a dict representation of a Word model + word_model = {} + word_model['word'] = 'testString' + word_model['translation'] = 'testString' + word_model['part_of_speech'] = 'Dosi' + + # Set up parameter values + customization_id = 'testString' + name = 'testString' + description = 'testString' + words = [word_model] + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.update_custom_model(**req_copy) + + def test_update_custom_model_value_error_with_retries(self): + # Enable retries and run test_update_custom_model_value_error. + _service.enable_retries() + self.test_update_custom_model_value_error() + + # Disable retries and run test_update_custom_model_value_error. + _service.disable_retries() + self.test_update_custom_model_value_error() + + +class TestGetCustomModel: + """ + Test Class for get_custom_model + """ + + @responses.activate + def test_get_custom_model_all_params(self): + """ + get_custom_model() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString') + mock_response = '{"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}], "prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + + # Invoke method + response = _service.get_custom_model( + customization_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_get_custom_model_all_params_with_retries(self): + # Enable retries and run test_get_custom_model_all_params. + _service.enable_retries() + self.test_get_custom_model_all_params() + + # Disable retries and run test_get_custom_model_all_params. + _service.disable_retries() + self.test_get_custom_model_all_params() + + @responses.activate + def test_get_custom_model_value_error(self): + """ + test_get_custom_model_value_error() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString') + mock_response = '{"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}], "prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_custom_model(**req_copy) + + def test_get_custom_model_value_error_with_retries(self): + # Enable retries and run test_get_custom_model_value_error. + _service.enable_retries() + self.test_get_custom_model_value_error() + + # Disable retries and run test_get_custom_model_value_error. + _service.disable_retries() + self.test_get_custom_model_value_error() + + +class TestDeleteCustomModel: + """ + Test Class for delete_custom_model + """ + + @responses.activate + def test_delete_custom_model_all_params(self): + """ + delete_custom_model() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString') + responses.add( + responses.DELETE, + url, + status=204, + ) + + # Set up parameter values + customization_id = 'testString' + + # Invoke method + response = _service.delete_custom_model( + customization_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 204 + + def test_delete_custom_model_all_params_with_retries(self): + # Enable retries and run test_delete_custom_model_all_params. + _service.enable_retries() + self.test_delete_custom_model_all_params() + + # Disable retries and run test_delete_custom_model_all_params. + _service.disable_retries() + self.test_delete_custom_model_all_params() + + @responses.activate + def test_delete_custom_model_value_error(self): + """ + test_delete_custom_model_value_error() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString') + responses.add( + responses.DELETE, + url, + status=204, + ) + + # Set up parameter values + customization_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_custom_model(**req_copy) + + def test_delete_custom_model_value_error_with_retries(self): + # Enable retries and run test_delete_custom_model_value_error. + _service.enable_retries() + self.test_delete_custom_model_value_error() + + # Disable retries and run test_delete_custom_model_value_error. + _service.disable_retries() + self.test_delete_custom_model_value_error() + + +# endregion +############################################################################## +# End of Service: CustomModels +############################################################################## + +############################################################################## +# Start of Service: CustomWords +############################################################################## +# region + + +class TestAddWords: + """ + Test Class for add_words + """ + + @responses.activate + def test_add_words_all_params(self): + """ + add_words() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/words') + responses.add( + responses.POST, + url, + status=200, + ) + + # Construct a dict representation of a Word model + word_model = {} + word_model['word'] = 'testString' + word_model['translation'] = 'testString' + word_model['part_of_speech'] = 'Dosi' + + # Set up parameter values + customization_id = 'testString' + words = [word_model] + + # Invoke method + response = _service.add_words( + customization_id, + words, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['words'] == [word_model] + + def test_add_words_all_params_with_retries(self): + # Enable retries and run test_add_words_all_params. + _service.enable_retries() + self.test_add_words_all_params() + + # Disable retries and run test_add_words_all_params. + _service.disable_retries() + self.test_add_words_all_params() + + @responses.activate + def test_add_words_value_error(self): + """ + test_add_words_value_error() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/words') + responses.add( + responses.POST, + url, + status=200, + ) + + # Construct a dict representation of a Word model + word_model = {} + word_model['word'] = 'testString' + word_model['translation'] = 'testString' + word_model['part_of_speech'] = 'Dosi' + + # Set up parameter values + customization_id = 'testString' + words = [word_model] + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + "words": words, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.add_words(**req_copy) + + def test_add_words_value_error_with_retries(self): + # Enable retries and run test_add_words_value_error. + _service.enable_retries() + self.test_add_words_value_error() + + # Disable retries and run test_add_words_value_error. + _service.disable_retries() + self.test_add_words_value_error() + + +class TestListWords: + """ + Test Class for list_words + """ + + @responses.activate + def test_list_words_all_params(self): + """ + list_words() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/words') + mock_response = '{"words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + + # Invoke method + response = _service.list_words( + customization_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_words_all_params_with_retries(self): + # Enable retries and run test_list_words_all_params. + _service.enable_retries() + self.test_list_words_all_params() + + # Disable retries and run test_list_words_all_params. + _service.disable_retries() + self.test_list_words_all_params() + + @responses.activate + def test_list_words_value_error(self): + """ + test_list_words_value_error() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/words') + mock_response = '{"words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.list_words(**req_copy) + + def test_list_words_value_error_with_retries(self): + # Enable retries and run test_list_words_value_error. + _service.enable_retries() + self.test_list_words_value_error() + + # Disable retries and run test_list_words_value_error. + _service.disable_retries() + self.test_list_words_value_error() + + +class TestAddWord: + """ + Test Class for add_word + """ + + @responses.activate + def test_add_word_all_params(self): + """ + add_word() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/words/testString') + responses.add( + responses.PUT, + url, + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + word = 'testString' + translation = 'testString' + part_of_speech = 'Dosi' + + # Invoke method + response = _service.add_word( + customization_id, + word, + translation, + part_of_speech=part_of_speech, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate body params + req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) + assert req_body['translation'] == 'testString' + assert req_body['part_of_speech'] == 'Dosi' + + def test_add_word_all_params_with_retries(self): + # Enable retries and run test_add_word_all_params. + _service.enable_retries() + self.test_add_word_all_params() + + # Disable retries and run test_add_word_all_params. + _service.disable_retries() + self.test_add_word_all_params() + + @responses.activate + def test_add_word_value_error(self): + """ + test_add_word_value_error() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/words/testString') + responses.add( + responses.PUT, + url, + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + word = 'testString' + translation = 'testString' + part_of_speech = 'Dosi' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + "word": word, + "translation": translation, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.add_word(**req_copy) + + def test_add_word_value_error_with_retries(self): + # Enable retries and run test_add_word_value_error. + _service.enable_retries() + self.test_add_word_value_error() + + # Disable retries and run test_add_word_value_error. + _service.disable_retries() + self.test_add_word_value_error() + + +class TestGetWord: + """ + Test Class for get_word + """ + + @responses.activate + def test_get_word_all_params(self): + """ + get_word() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/words/testString') + mock_response = '{"translation": "translation", "part_of_speech": "Dosi"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + word = 'testString' + + # Invoke method + response = _service.get_word( + customization_id, + word, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_get_word_all_params_with_retries(self): + # Enable retries and run test_get_word_all_params. + _service.enable_retries() + self.test_get_word_all_params() + + # Disable retries and run test_get_word_all_params. + _service.disable_retries() + self.test_get_word_all_params() + + @responses.activate + def test_get_word_value_error(self): + """ + test_get_word_value_error() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/words/testString') + mock_response = '{"translation": "translation", "part_of_speech": "Dosi"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + word = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + "word": word, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_word(**req_copy) + + def test_get_word_value_error_with_retries(self): + # Enable retries and run test_get_word_value_error. + _service.enable_retries() + self.test_get_word_value_error() + + # Disable retries and run test_get_word_value_error. + _service.disable_retries() + self.test_get_word_value_error() + + +class TestDeleteWord: + """ + Test Class for delete_word + """ + + @responses.activate + def test_delete_word_all_params(self): + """ + delete_word() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/words/testString') + responses.add( + responses.DELETE, + url, + status=204, + ) + + # Set up parameter values + customization_id = 'testString' + word = 'testString' + + # Invoke method + response = _service.delete_word( + customization_id, + word, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 204 + + def test_delete_word_all_params_with_retries(self): + # Enable retries and run test_delete_word_all_params. + _service.enable_retries() + self.test_delete_word_all_params() + + # Disable retries and run test_delete_word_all_params. + _service.disable_retries() + self.test_delete_word_all_params() + + @responses.activate + def test_delete_word_value_error(self): + """ + test_delete_word_value_error() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/words/testString') + responses.add( + responses.DELETE, + url, + status=204, + ) + + # Set up parameter values + customization_id = 'testString' + word = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + "word": word, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_word(**req_copy) + + def test_delete_word_value_error_with_retries(self): + # Enable retries and run test_delete_word_value_error. + _service.enable_retries() + self.test_delete_word_value_error() + + # Disable retries and run test_delete_word_value_error. + _service.disable_retries() + self.test_delete_word_value_error() + + +# endregion +############################################################################## +# End of Service: CustomWords +############################################################################## + +############################################################################## +# Start of Service: CustomPrompts +############################################################################## +# region + + +class TestListCustomPrompts: + """ + Test Class for list_custom_prompts + """ + + @responses.activate + def test_list_custom_prompts_all_params(self): + """ + list_custom_prompts() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/prompts') + mock_response = '{"prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + + # Invoke method + response = _service.list_custom_prompts( + customization_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_custom_prompts_all_params_with_retries(self): + # Enable retries and run test_list_custom_prompts_all_params. + _service.enable_retries() + self.test_list_custom_prompts_all_params() + + # Disable retries and run test_list_custom_prompts_all_params. + _service.disable_retries() + self.test_list_custom_prompts_all_params() + + @responses.activate + def test_list_custom_prompts_value_error(self): + """ + test_list_custom_prompts_value_error() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/prompts') + mock_response = '{"prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.list_custom_prompts(**req_copy) + + def test_list_custom_prompts_value_error_with_retries(self): + # Enable retries and run test_list_custom_prompts_value_error. + _service.enable_retries() + self.test_list_custom_prompts_value_error() + + # Disable retries and run test_list_custom_prompts_value_error. + _service.disable_retries() + self.test_list_custom_prompts_value_error() + + +class TestAddCustomPrompt: + """ + Test Class for add_custom_prompt + """ + + @responses.activate + def test_add_custom_prompt_all_params(self): + """ + add_custom_prompt() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/prompts/testString') + mock_response = '{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Construct a dict representation of a PromptMetadata model + prompt_metadata_model = {} + prompt_metadata_model['prompt_text'] = 'testString' + prompt_metadata_model['speaker_id'] = 'testString' + + # Set up parameter values + customization_id = 'testString' + prompt_id = 'testString' + metadata = prompt_metadata_model + file = io.BytesIO(b'This is a mock file.').getvalue() + + # Invoke method + response = _service.add_custom_prompt( + customization_id, + prompt_id, + metadata, + file, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + + def test_add_custom_prompt_all_params_with_retries(self): + # Enable retries and run test_add_custom_prompt_all_params. + _service.enable_retries() + self.test_add_custom_prompt_all_params() + + # Disable retries and run test_add_custom_prompt_all_params. + _service.disable_retries() + self.test_add_custom_prompt_all_params() + + @responses.activate + def test_add_custom_prompt_value_error(self): + """ + test_add_custom_prompt_value_error() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/prompts/testString') + mock_response = '{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Construct a dict representation of a PromptMetadata model + prompt_metadata_model = {} + prompt_metadata_model['prompt_text'] = 'testString' + prompt_metadata_model['speaker_id'] = 'testString' + + # Set up parameter values + customization_id = 'testString' + prompt_id = 'testString' + metadata = prompt_metadata_model + file = io.BytesIO(b'This is a mock file.').getvalue() + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + "prompt_id": prompt_id, + "metadata": metadata, + "file": file, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.add_custom_prompt(**req_copy) + + def test_add_custom_prompt_value_error_with_retries(self): + # Enable retries and run test_add_custom_prompt_value_error. + _service.enable_retries() + self.test_add_custom_prompt_value_error() + + # Disable retries and run test_add_custom_prompt_value_error. + _service.disable_retries() + self.test_add_custom_prompt_value_error() + + +class TestGetCustomPrompt: + """ + Test Class for get_custom_prompt + """ + + @responses.activate + def test_get_custom_prompt_all_params(self): + """ + get_custom_prompt() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/prompts/testString') + mock_response = '{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + prompt_id = 'testString' + + # Invoke method + response = _service.get_custom_prompt( + customization_id, + prompt_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_get_custom_prompt_all_params_with_retries(self): + # Enable retries and run test_get_custom_prompt_all_params. + _service.enable_retries() + self.test_get_custom_prompt_all_params() + + # Disable retries and run test_get_custom_prompt_all_params. + _service.disable_retries() + self.test_get_custom_prompt_all_params() + + @responses.activate + def test_get_custom_prompt_value_error(self): + """ + test_get_custom_prompt_value_error() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/prompts/testString') + mock_response = '{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + customization_id = 'testString' + prompt_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + "prompt_id": prompt_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_custom_prompt(**req_copy) + + def test_get_custom_prompt_value_error_with_retries(self): + # Enable retries and run test_get_custom_prompt_value_error. + _service.enable_retries() + self.test_get_custom_prompt_value_error() + + # Disable retries and run test_get_custom_prompt_value_error. + _service.disable_retries() + self.test_get_custom_prompt_value_error() + + +class TestDeleteCustomPrompt: + """ + Test Class for delete_custom_prompt + """ + + @responses.activate + def test_delete_custom_prompt_all_params(self): + """ + delete_custom_prompt() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/prompts/testString') + responses.add( + responses.DELETE, + url, + status=204, + ) + + # Set up parameter values + customization_id = 'testString' + prompt_id = 'testString' + + # Invoke method + response = _service.delete_custom_prompt( + customization_id, + prompt_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 204 + + def test_delete_custom_prompt_all_params_with_retries(self): + # Enable retries and run test_delete_custom_prompt_all_params. + _service.enable_retries() + self.test_delete_custom_prompt_all_params() + + # Disable retries and run test_delete_custom_prompt_all_params. + _service.disable_retries() + self.test_delete_custom_prompt_all_params() + + @responses.activate + def test_delete_custom_prompt_value_error(self): + """ + test_delete_custom_prompt_value_error() + """ + # Set up mock + url = preprocess_url('/v1/customizations/testString/prompts/testString') + responses.add( + responses.DELETE, + url, + status=204, + ) + + # Set up parameter values + customization_id = 'testString' + prompt_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customization_id": customization_id, + "prompt_id": prompt_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_custom_prompt(**req_copy) + + def test_delete_custom_prompt_value_error_with_retries(self): + # Enable retries and run test_delete_custom_prompt_value_error. + _service.enable_retries() + self.test_delete_custom_prompt_value_error() + + # Disable retries and run test_delete_custom_prompt_value_error. + _service.disable_retries() + self.test_delete_custom_prompt_value_error() + + +# endregion +############################################################################## +# End of Service: CustomPrompts +############################################################################## + +############################################################################## +# Start of Service: SpeakerModels +############################################################################## +# region + + +class TestListSpeakerModels: + """ + Test Class for list_speaker_models + """ + + @responses.activate + def test_list_speaker_models_all_params(self): + """ + list_speaker_models() + """ + # Set up mock + url = preprocess_url('/v1/speakers') + mock_response = '{"speakers": [{"speaker_id": "speaker_id", "name": "name"}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Invoke method + response = _service.list_speaker_models() + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_list_speaker_models_all_params_with_retries(self): + # Enable retries and run test_list_speaker_models_all_params. + _service.enable_retries() + self.test_list_speaker_models_all_params() + + # Disable retries and run test_list_speaker_models_all_params. + _service.disable_retries() + self.test_list_speaker_models_all_params() + + +class TestCreateSpeakerModel: + """ + Test Class for create_speaker_model + """ + + @responses.activate + def test_create_speaker_model_all_params(self): + """ + create_speaker_model() + """ + # Set up mock + url = preprocess_url('/v1/speakers') + mock_response = '{"speaker_id": "speaker_id"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Set up parameter values + speaker_name = 'testString' + audio = io.BytesIO(b'This is a mock file.').getvalue() + + # Invoke method + response = _service.create_speaker_model( + speaker_name, + audio, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 201 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'speaker_name={}'.format(speaker_name) in query_string + # Validate body params + assert responses.calls[0].request.body == audio + + def test_create_speaker_model_all_params_with_retries(self): + # Enable retries and run test_create_speaker_model_all_params. + _service.enable_retries() + self.test_create_speaker_model_all_params() + + # Disable retries and run test_create_speaker_model_all_params. + _service.disable_retries() + self.test_create_speaker_model_all_params() + + @responses.activate + def test_create_speaker_model_value_error(self): + """ + test_create_speaker_model_value_error() + """ + # Set up mock + url = preprocess_url('/v1/speakers') + mock_response = '{"speaker_id": "speaker_id"}' + responses.add( + responses.POST, + url, + body=mock_response, + content_type='application/json', + status=201, + ) + + # Set up parameter values + speaker_name = 'testString' + audio = io.BytesIO(b'This is a mock file.').getvalue() + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "speaker_name": speaker_name, + "audio": audio, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.create_speaker_model(**req_copy) + + def test_create_speaker_model_value_error_with_retries(self): + # Enable retries and run test_create_speaker_model_value_error. + _service.enable_retries() + self.test_create_speaker_model_value_error() + + # Disable retries and run test_create_speaker_model_value_error. + _service.disable_retries() + self.test_create_speaker_model_value_error() + + +class TestGetSpeakerModel: + """ + Test Class for get_speaker_model + """ + + @responses.activate + def test_get_speaker_model_all_params(self): + """ + get_speaker_model() + """ + # Set up mock + url = preprocess_url('/v1/speakers/testString') + mock_response = '{"customizations": [{"customization_id": "customization_id", "prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error"}]}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + speaker_id = 'testString' + + # Invoke method + response = _service.get_speaker_model( + speaker_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + + def test_get_speaker_model_all_params_with_retries(self): + # Enable retries and run test_get_speaker_model_all_params. + _service.enable_retries() + self.test_get_speaker_model_all_params() + + # Disable retries and run test_get_speaker_model_all_params. + _service.disable_retries() + self.test_get_speaker_model_all_params() + + @responses.activate + def test_get_speaker_model_value_error(self): + """ + test_get_speaker_model_value_error() + """ + # Set up mock + url = preprocess_url('/v1/speakers/testString') + mock_response = '{"customizations": [{"customization_id": "customization_id", "prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error"}]}]}' + responses.add( + responses.GET, + url, + body=mock_response, + content_type='application/json', + status=200, + ) + + # Set up parameter values + speaker_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "speaker_id": speaker_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.get_speaker_model(**req_copy) + + def test_get_speaker_model_value_error_with_retries(self): + # Enable retries and run test_get_speaker_model_value_error. + _service.enable_retries() + self.test_get_speaker_model_value_error() + + # Disable retries and run test_get_speaker_model_value_error. + _service.disable_retries() + self.test_get_speaker_model_value_error() + + +class TestDeleteSpeakerModel: + """ + Test Class for delete_speaker_model + """ + + @responses.activate + def test_delete_speaker_model_all_params(self): + """ + delete_speaker_model() + """ + # Set up mock + url = preprocess_url('/v1/speakers/testString') + responses.add( + responses.DELETE, + url, + status=204, + ) + + # Set up parameter values + speaker_id = 'testString' + + # Invoke method + response = _service.delete_speaker_model( + speaker_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 204 + + def test_delete_speaker_model_all_params_with_retries(self): + # Enable retries and run test_delete_speaker_model_all_params. + _service.enable_retries() + self.test_delete_speaker_model_all_params() + + # Disable retries and run test_delete_speaker_model_all_params. + _service.disable_retries() + self.test_delete_speaker_model_all_params() + + @responses.activate + def test_delete_speaker_model_value_error(self): + """ + test_delete_speaker_model_value_error() + """ + # Set up mock + url = preprocess_url('/v1/speakers/testString') + responses.add( + responses.DELETE, + url, + status=204, + ) + + # Set up parameter values + speaker_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "speaker_id": speaker_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_speaker_model(**req_copy) + + def test_delete_speaker_model_value_error_with_retries(self): + # Enable retries and run test_delete_speaker_model_value_error. + _service.enable_retries() + self.test_delete_speaker_model_value_error() + + # Disable retries and run test_delete_speaker_model_value_error. + _service.disable_retries() + self.test_delete_speaker_model_value_error() + + +# endregion +############################################################################## +# End of Service: SpeakerModels +############################################################################## + +############################################################################## +# Start of Service: UserData +############################################################################## +# region + + +class TestDeleteUserData: + """ + Test Class for delete_user_data + """ + + @responses.activate + def test_delete_user_data_all_params(self): + """ + delete_user_data() + """ + # Set up mock + url = preprocess_url('/v1/user_data') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + customer_id = 'testString' + + # Invoke method + response = _service.delete_user_data( + customer_id, + headers={}, + ) + + # Check for correct operation + assert len(responses.calls) == 1 + assert response.status_code == 200 + # Validate query params + query_string = responses.calls[0].request.url.split('?', 1)[1] + query_string = urllib.parse.unquote_plus(query_string) + assert 'customer_id={}'.format(customer_id) in query_string + + def test_delete_user_data_all_params_with_retries(self): + # Enable retries and run test_delete_user_data_all_params. + _service.enable_retries() + self.test_delete_user_data_all_params() + + # Disable retries and run test_delete_user_data_all_params. + _service.disable_retries() + self.test_delete_user_data_all_params() + + @responses.activate + def test_delete_user_data_value_error(self): + """ + test_delete_user_data_value_error() + """ + # Set up mock + url = preprocess_url('/v1/user_data') + responses.add( + responses.DELETE, + url, + status=200, + ) + + # Set up parameter values + customer_id = 'testString' + + # Pass in all but one required param and check for a ValueError + req_param_dict = { + "customer_id": customer_id, + } + for param in req_param_dict.keys(): + req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()} + with pytest.raises(ValueError): + _service.delete_user_data(**req_copy) + + def test_delete_user_data_value_error_with_retries(self): + # Enable retries and run test_delete_user_data_value_error. + _service.enable_retries() + self.test_delete_user_data_value_error() + + # Disable retries and run test_delete_user_data_value_error. + _service.disable_retries() + self.test_delete_user_data_value_error() + + +# endregion +############################################################################## +# End of Service: UserData +############################################################################## + + +############################################################################## +# Start of Model Tests +############################################################################## +# region + + +class TestModel_CustomModel: + """ + Test Class for CustomModel + """ + + def test_custom_model_serialization(self): + """ + Test serialization/deserialization for CustomModel + """ + + # Construct dict forms of any model objects needed in order to build this model. + + word_model = {} # Word + word_model['word'] = 'testString' + word_model['translation'] = 'testString' + word_model['part_of_speech'] = 'Dosi' + + prompt_model = {} # Prompt + prompt_model['prompt'] = 'testString' + prompt_model['prompt_id'] = 'testString' + prompt_model['status'] = 'testString' + prompt_model['error'] = 'testString' + prompt_model['speaker_id'] = 'testString' + + # Construct a json representation of a CustomModel model + custom_model_model_json = {} + custom_model_model_json['customization_id'] = 'testString' + custom_model_model_json['name'] = 'testString' + custom_model_model_json['language'] = 'testString' + custom_model_model_json['owner'] = 'testString' + custom_model_model_json['created'] = 'testString' + custom_model_model_json['last_modified'] = 'testString' + custom_model_model_json['description'] = 'testString' + custom_model_model_json['words'] = [word_model] + custom_model_model_json['prompts'] = [prompt_model] + + # Construct a model instance of CustomModel by calling from_dict on the json representation + custom_model_model = CustomModel.from_dict(custom_model_model_json) + assert custom_model_model != False + + # Construct a model instance of CustomModel by calling from_dict on the json representation + custom_model_model_dict = CustomModel.from_dict(custom_model_model_json).__dict__ + custom_model_model2 = CustomModel(**custom_model_model_dict) + + # Verify the model instances are equivalent + assert custom_model_model == custom_model_model2 + + # Convert model instance back to dict and verify no loss of data + custom_model_model_json2 = custom_model_model.to_dict() + assert custom_model_model_json2 == custom_model_model_json + + +class TestModel_CustomModels: + """ + Test Class for CustomModels + """ + + def test_custom_models_serialization(self): + """ + Test serialization/deserialization for CustomModels + """ + + # Construct dict forms of any model objects needed in order to build this model. + + word_model = {} # Word + word_model['word'] = 'testString' + word_model['translation'] = 'testString' + word_model['part_of_speech'] = 'Dosi' + + prompt_model = {} # Prompt + prompt_model['prompt'] = 'testString' + prompt_model['prompt_id'] = 'testString' + prompt_model['status'] = 'testString' + prompt_model['error'] = 'testString' + prompt_model['speaker_id'] = 'testString' + + custom_model_model = {} # CustomModel + custom_model_model['customization_id'] = 'testString' + custom_model_model['name'] = 'testString' + custom_model_model['language'] = 'testString' + custom_model_model['owner'] = 'testString' + custom_model_model['created'] = 'testString' + custom_model_model['last_modified'] = 'testString' + custom_model_model['description'] = 'testString' + custom_model_model['words'] = [word_model] + custom_model_model['prompts'] = [prompt_model] + + # Construct a json representation of a CustomModels model + custom_models_model_json = {} + custom_models_model_json['customizations'] = [custom_model_model] + + # Construct a model instance of CustomModels by calling from_dict on the json representation + custom_models_model = CustomModels.from_dict(custom_models_model_json) + assert custom_models_model != False + + # Construct a model instance of CustomModels by calling from_dict on the json representation + custom_models_model_dict = CustomModels.from_dict(custom_models_model_json).__dict__ + custom_models_model2 = CustomModels(**custom_models_model_dict) + + # Verify the model instances are equivalent + assert custom_models_model == custom_models_model2 + + # Convert model instance back to dict and verify no loss of data + custom_models_model_json2 = custom_models_model.to_dict() + assert custom_models_model_json2 == custom_models_model_json + + +class TestModel_Prompt: + """ + Test Class for Prompt + """ + + def test_prompt_serialization(self): + """ + Test serialization/deserialization for Prompt + """ + + # Construct a json representation of a Prompt model + prompt_model_json = {} + prompt_model_json['prompt'] = 'testString' + prompt_model_json['prompt_id'] = 'testString' + prompt_model_json['status'] = 'testString' + prompt_model_json['error'] = 'testString' + prompt_model_json['speaker_id'] = 'testString' + + # Construct a model instance of Prompt by calling from_dict on the json representation + prompt_model = Prompt.from_dict(prompt_model_json) + assert prompt_model != False + + # Construct a model instance of Prompt by calling from_dict on the json representation + prompt_model_dict = Prompt.from_dict(prompt_model_json).__dict__ + prompt_model2 = Prompt(**prompt_model_dict) + + # Verify the model instances are equivalent + assert prompt_model == prompt_model2 + + # Convert model instance back to dict and verify no loss of data + prompt_model_json2 = prompt_model.to_dict() + assert prompt_model_json2 == prompt_model_json + + +class TestModel_PromptMetadata: + """ + Test Class for PromptMetadata + """ + + def test_prompt_metadata_serialization(self): + """ + Test serialization/deserialization for PromptMetadata + """ + + # Construct a json representation of a PromptMetadata model + prompt_metadata_model_json = {} + prompt_metadata_model_json['prompt_text'] = 'testString' + prompt_metadata_model_json['speaker_id'] = 'testString' + + # Construct a model instance of PromptMetadata by calling from_dict on the json representation + prompt_metadata_model = PromptMetadata.from_dict(prompt_metadata_model_json) + assert prompt_metadata_model != False + + # Construct a model instance of PromptMetadata by calling from_dict on the json representation + prompt_metadata_model_dict = PromptMetadata.from_dict(prompt_metadata_model_json).__dict__ + prompt_metadata_model2 = PromptMetadata(**prompt_metadata_model_dict) + + # Verify the model instances are equivalent + assert prompt_metadata_model == prompt_metadata_model2 + + # Convert model instance back to dict and verify no loss of data + prompt_metadata_model_json2 = prompt_metadata_model.to_dict() + assert prompt_metadata_model_json2 == prompt_metadata_model_json + + +class TestModel_Prompts: + """ + Test Class for Prompts + """ + + def test_prompts_serialization(self): + """ + Test serialization/deserialization for Prompts + """ + + # Construct dict forms of any model objects needed in order to build this model. + + prompt_model = {} # Prompt + prompt_model['prompt'] = 'testString' + prompt_model['prompt_id'] = 'testString' + prompt_model['status'] = 'testString' + prompt_model['error'] = 'testString' + prompt_model['speaker_id'] = 'testString' + + # Construct a json representation of a Prompts model + prompts_model_json = {} + prompts_model_json['prompts'] = [prompt_model] + + # Construct a model instance of Prompts by calling from_dict on the json representation + prompts_model = Prompts.from_dict(prompts_model_json) + assert prompts_model != False + + # Construct a model instance of Prompts by calling from_dict on the json representation + prompts_model_dict = Prompts.from_dict(prompts_model_json).__dict__ + prompts_model2 = Prompts(**prompts_model_dict) + + # Verify the model instances are equivalent + assert prompts_model == prompts_model2 + + # Convert model instance back to dict and verify no loss of data + prompts_model_json2 = prompts_model.to_dict() + assert prompts_model_json2 == prompts_model_json + + +class TestModel_Pronunciation: + """ + Test Class for Pronunciation + """ + + def test_pronunciation_serialization(self): + """ + Test serialization/deserialization for Pronunciation + """ + + # Construct a json representation of a Pronunciation model + pronunciation_model_json = {} + pronunciation_model_json['pronunciation'] = 'testString' + + # Construct a model instance of Pronunciation by calling from_dict on the json representation + pronunciation_model = Pronunciation.from_dict(pronunciation_model_json) + assert pronunciation_model != False + + # Construct a model instance of Pronunciation by calling from_dict on the json representation + pronunciation_model_dict = Pronunciation.from_dict(pronunciation_model_json).__dict__ + pronunciation_model2 = Pronunciation(**pronunciation_model_dict) + + # Verify the model instances are equivalent + assert pronunciation_model == pronunciation_model2 + + # Convert model instance back to dict and verify no loss of data + pronunciation_model_json2 = pronunciation_model.to_dict() + assert pronunciation_model_json2 == pronunciation_model_json + + +class TestModel_Speaker: + """ + Test Class for Speaker + """ + + def test_speaker_serialization(self): + """ + Test serialization/deserialization for Speaker + """ + + # Construct a json representation of a Speaker model + speaker_model_json = {} + speaker_model_json['speaker_id'] = 'testString' + speaker_model_json['name'] = 'testString' + + # Construct a model instance of Speaker by calling from_dict on the json representation + speaker_model = Speaker.from_dict(speaker_model_json) + assert speaker_model != False + + # Construct a model instance of Speaker by calling from_dict on the json representation + speaker_model_dict = Speaker.from_dict(speaker_model_json).__dict__ + speaker_model2 = Speaker(**speaker_model_dict) + + # Verify the model instances are equivalent + assert speaker_model == speaker_model2 + + # Convert model instance back to dict and verify no loss of data + speaker_model_json2 = speaker_model.to_dict() + assert speaker_model_json2 == speaker_model_json + + +class TestModel_SpeakerCustomModel: + """ + Test Class for SpeakerCustomModel + """ + + def test_speaker_custom_model_serialization(self): + """ + Test serialization/deserialization for SpeakerCustomModel + """ + + # Construct dict forms of any model objects needed in order to build this model. + + speaker_prompt_model = {} # SpeakerPrompt + speaker_prompt_model['prompt'] = 'testString' + speaker_prompt_model['prompt_id'] = 'testString' + speaker_prompt_model['status'] = 'testString' + speaker_prompt_model['error'] = 'testString' + + # Construct a json representation of a SpeakerCustomModel model + speaker_custom_model_model_json = {} + speaker_custom_model_model_json['customization_id'] = 'testString' + speaker_custom_model_model_json['prompts'] = [speaker_prompt_model] + + # Construct a model instance of SpeakerCustomModel by calling from_dict on the json representation + speaker_custom_model_model = SpeakerCustomModel.from_dict(speaker_custom_model_model_json) + assert speaker_custom_model_model != False + + # Construct a model instance of SpeakerCustomModel by calling from_dict on the json representation + speaker_custom_model_model_dict = SpeakerCustomModel.from_dict(speaker_custom_model_model_json).__dict__ + speaker_custom_model_model2 = SpeakerCustomModel(**speaker_custom_model_model_dict) + + # Verify the model instances are equivalent + assert speaker_custom_model_model == speaker_custom_model_model2 + + # Convert model instance back to dict and verify no loss of data + speaker_custom_model_model_json2 = speaker_custom_model_model.to_dict() + assert speaker_custom_model_model_json2 == speaker_custom_model_model_json + + +class TestModel_SpeakerCustomModels: + """ + Test Class for SpeakerCustomModels + """ + + def test_speaker_custom_models_serialization(self): + """ + Test serialization/deserialization for SpeakerCustomModels + """ + + # Construct dict forms of any model objects needed in order to build this model. + + speaker_prompt_model = {} # SpeakerPrompt + speaker_prompt_model['prompt'] = 'testString' + speaker_prompt_model['prompt_id'] = 'testString' + speaker_prompt_model['status'] = 'testString' + speaker_prompt_model['error'] = 'testString' + + speaker_custom_model_model = {} # SpeakerCustomModel + speaker_custom_model_model['customization_id'] = 'testString' + speaker_custom_model_model['prompts'] = [speaker_prompt_model] + + # Construct a json representation of a SpeakerCustomModels model + speaker_custom_models_model_json = {} + speaker_custom_models_model_json['customizations'] = [speaker_custom_model_model] + + # Construct a model instance of SpeakerCustomModels by calling from_dict on the json representation + speaker_custom_models_model = SpeakerCustomModels.from_dict(speaker_custom_models_model_json) + assert speaker_custom_models_model != False + + # Construct a model instance of SpeakerCustomModels by calling from_dict on the json representation + speaker_custom_models_model_dict = SpeakerCustomModels.from_dict(speaker_custom_models_model_json).__dict__ + speaker_custom_models_model2 = SpeakerCustomModels(**speaker_custom_models_model_dict) + + # Verify the model instances are equivalent + assert speaker_custom_models_model == speaker_custom_models_model2 + + # Convert model instance back to dict and verify no loss of data + speaker_custom_models_model_json2 = speaker_custom_models_model.to_dict() + assert speaker_custom_models_model_json2 == speaker_custom_models_model_json + + +class TestModel_SpeakerModel: + """ + Test Class for SpeakerModel + """ + + def test_speaker_model_serialization(self): + """ + Test serialization/deserialization for SpeakerModel + """ + + # Construct a json representation of a SpeakerModel model + speaker_model_model_json = {} + speaker_model_model_json['speaker_id'] = 'testString' + + # Construct a model instance of SpeakerModel by calling from_dict on the json representation + speaker_model_model = SpeakerModel.from_dict(speaker_model_model_json) + assert speaker_model_model != False + + # Construct a model instance of SpeakerModel by calling from_dict on the json representation + speaker_model_model_dict = SpeakerModel.from_dict(speaker_model_model_json).__dict__ + speaker_model_model2 = SpeakerModel(**speaker_model_model_dict) + + # Verify the model instances are equivalent + assert speaker_model_model == speaker_model_model2 + + # Convert model instance back to dict and verify no loss of data + speaker_model_model_json2 = speaker_model_model.to_dict() + assert speaker_model_model_json2 == speaker_model_model_json + + +class TestModel_SpeakerPrompt: + """ + Test Class for SpeakerPrompt + """ + + def test_speaker_prompt_serialization(self): + """ + Test serialization/deserialization for SpeakerPrompt + """ + + # Construct a json representation of a SpeakerPrompt model + speaker_prompt_model_json = {} + speaker_prompt_model_json['prompt'] = 'testString' + speaker_prompt_model_json['prompt_id'] = 'testString' + speaker_prompt_model_json['status'] = 'testString' + speaker_prompt_model_json['error'] = 'testString' + + # Construct a model instance of SpeakerPrompt by calling from_dict on the json representation + speaker_prompt_model = SpeakerPrompt.from_dict(speaker_prompt_model_json) + assert speaker_prompt_model != False + + # Construct a model instance of SpeakerPrompt by calling from_dict on the json representation + speaker_prompt_model_dict = SpeakerPrompt.from_dict(speaker_prompt_model_json).__dict__ + speaker_prompt_model2 = SpeakerPrompt(**speaker_prompt_model_dict) + + # Verify the model instances are equivalent + assert speaker_prompt_model == speaker_prompt_model2 + + # Convert model instance back to dict and verify no loss of data + speaker_prompt_model_json2 = speaker_prompt_model.to_dict() + assert speaker_prompt_model_json2 == speaker_prompt_model_json + + +class TestModel_Speakers: + """ + Test Class for Speakers + """ + + def test_speakers_serialization(self): + """ + Test serialization/deserialization for Speakers + """ + + # Construct dict forms of any model objects needed in order to build this model. + + speaker_model = {} # Speaker + speaker_model['speaker_id'] = 'testString' + speaker_model['name'] = 'testString' + + # Construct a json representation of a Speakers model + speakers_model_json = {} + speakers_model_json['speakers'] = [speaker_model] + + # Construct a model instance of Speakers by calling from_dict on the json representation + speakers_model = Speakers.from_dict(speakers_model_json) + assert speakers_model != False + + # Construct a model instance of Speakers by calling from_dict on the json representation + speakers_model_dict = Speakers.from_dict(speakers_model_json).__dict__ + speakers_model2 = Speakers(**speakers_model_dict) + + # Verify the model instances are equivalent + assert speakers_model == speakers_model2 + + # Convert model instance back to dict and verify no loss of data + speakers_model_json2 = speakers_model.to_dict() + assert speakers_model_json2 == speakers_model_json + + +class TestModel_SupportedFeatures: + """ + Test Class for SupportedFeatures + """ + + def test_supported_features_serialization(self): + """ + Test serialization/deserialization for SupportedFeatures + """ + + # Construct a json representation of a SupportedFeatures model + supported_features_model_json = {} + supported_features_model_json['custom_pronunciation'] = True + supported_features_model_json['voice_transformation'] = True + + # Construct a model instance of SupportedFeatures by calling from_dict on the json representation + supported_features_model = SupportedFeatures.from_dict(supported_features_model_json) + assert supported_features_model != False + + # Construct a model instance of SupportedFeatures by calling from_dict on the json representation + supported_features_model_dict = SupportedFeatures.from_dict(supported_features_model_json).__dict__ + supported_features_model2 = SupportedFeatures(**supported_features_model_dict) + + # Verify the model instances are equivalent + assert supported_features_model == supported_features_model2 + + # Convert model instance back to dict and verify no loss of data + supported_features_model_json2 = supported_features_model.to_dict() + assert supported_features_model_json2 == supported_features_model_json + + +class TestModel_Translation: + """ + Test Class for Translation + """ + + def test_translation_serialization(self): + """ + Test serialization/deserialization for Translation + """ + + # Construct a json representation of a Translation model + translation_model_json = {} + translation_model_json['translation'] = 'testString' + translation_model_json['part_of_speech'] = 'Dosi' + + # Construct a model instance of Translation by calling from_dict on the json representation + translation_model = Translation.from_dict(translation_model_json) + assert translation_model != False + + # Construct a model instance of Translation by calling from_dict on the json representation + translation_model_dict = Translation.from_dict(translation_model_json).__dict__ + translation_model2 = Translation(**translation_model_dict) + + # Verify the model instances are equivalent + assert translation_model == translation_model2 + + # Convert model instance back to dict and verify no loss of data + translation_model_json2 = translation_model.to_dict() + assert translation_model_json2 == translation_model_json + + +class TestModel_Voice: + """ + Test Class for Voice + """ + + def test_voice_serialization(self): + """ + Test serialization/deserialization for Voice + """ + + # Construct dict forms of any model objects needed in order to build this model. + + supported_features_model = {} # SupportedFeatures + supported_features_model['custom_pronunciation'] = True + supported_features_model['voice_transformation'] = True + + word_model = {} # Word + word_model['word'] = 'testString' + word_model['translation'] = 'testString' + word_model['part_of_speech'] = 'Dosi' + + prompt_model = {} # Prompt + prompt_model['prompt'] = 'testString' + prompt_model['prompt_id'] = 'testString' + prompt_model['status'] = 'testString' + prompt_model['error'] = 'testString' + prompt_model['speaker_id'] = 'testString' + + custom_model_model = {} # CustomModel + custom_model_model['customization_id'] = 'testString' + custom_model_model['name'] = 'testString' + custom_model_model['language'] = 'testString' + custom_model_model['owner'] = 'testString' + custom_model_model['created'] = 'testString' + custom_model_model['last_modified'] = 'testString' + custom_model_model['description'] = 'testString' + custom_model_model['words'] = [word_model] + custom_model_model['prompts'] = [prompt_model] + + # Construct a json representation of a Voice model + voice_model_json = {} + voice_model_json['url'] = 'testString' + voice_model_json['gender'] = 'testString' + voice_model_json['name'] = 'testString' + voice_model_json['language'] = 'testString' + voice_model_json['description'] = 'testString' + voice_model_json['customizable'] = True + voice_model_json['supported_features'] = supported_features_model + voice_model_json['customization'] = custom_model_model + + # Construct a model instance of Voice by calling from_dict on the json representation + voice_model = Voice.from_dict(voice_model_json) + assert voice_model != False + + # Construct a model instance of Voice by calling from_dict on the json representation + voice_model_dict = Voice.from_dict(voice_model_json).__dict__ + voice_model2 = Voice(**voice_model_dict) + + # Verify the model instances are equivalent + assert voice_model == voice_model2 + + # Convert model instance back to dict and verify no loss of data + voice_model_json2 = voice_model.to_dict() + assert voice_model_json2 == voice_model_json + + +class TestModel_Voices: + """ + Test Class for Voices + """ + + def test_voices_serialization(self): + """ + Test serialization/deserialization for Voices + """ + + # Construct dict forms of any model objects needed in order to build this model. + + supported_features_model = {} # SupportedFeatures + supported_features_model['custom_pronunciation'] = True + supported_features_model['voice_transformation'] = True + + word_model = {} # Word + word_model['word'] = 'testString' + word_model['translation'] = 'testString' + word_model['part_of_speech'] = 'Dosi' + + prompt_model = {} # Prompt + prompt_model['prompt'] = 'testString' + prompt_model['prompt_id'] = 'testString' + prompt_model['status'] = 'testString' + prompt_model['error'] = 'testString' + prompt_model['speaker_id'] = 'testString' + + custom_model_model = {} # CustomModel + custom_model_model['customization_id'] = 'testString' + custom_model_model['name'] = 'testString' + custom_model_model['language'] = 'testString' + custom_model_model['owner'] = 'testString' + custom_model_model['created'] = 'testString' + custom_model_model['last_modified'] = 'testString' + custom_model_model['description'] = 'testString' + custom_model_model['words'] = [word_model] + custom_model_model['prompts'] = [prompt_model] + + voice_model = {} # Voice + voice_model['url'] = 'testString' + voice_model['gender'] = 'testString' + voice_model['name'] = 'testString' + voice_model['language'] = 'testString' + voice_model['description'] = 'testString' + voice_model['customizable'] = True + voice_model['supported_features'] = supported_features_model + voice_model['customization'] = custom_model_model + + # Construct a json representation of a Voices model + voices_model_json = {} + voices_model_json['voices'] = [voice_model] + + # Construct a model instance of Voices by calling from_dict on the json representation + voices_model = Voices.from_dict(voices_model_json) + assert voices_model != False + + # Construct a model instance of Voices by calling from_dict on the json representation + voices_model_dict = Voices.from_dict(voices_model_json).__dict__ + voices_model2 = Voices(**voices_model_dict) + + # Verify the model instances are equivalent + assert voices_model == voices_model2 + + # Convert model instance back to dict and verify no loss of data + voices_model_json2 = voices_model.to_dict() + assert voices_model_json2 == voices_model_json + + +class TestModel_Word: + """ + Test Class for Word + """ + + def test_word_serialization(self): + """ + Test serialization/deserialization for Word + """ + + # Construct a json representation of a Word model + word_model_json = {} + word_model_json['word'] = 'testString' + word_model_json['translation'] = 'testString' + word_model_json['part_of_speech'] = 'Dosi' + + # Construct a model instance of Word by calling from_dict on the json representation + word_model = Word.from_dict(word_model_json) + assert word_model != False + + # Construct a model instance of Word by calling from_dict on the json representation + word_model_dict = Word.from_dict(word_model_json).__dict__ + word_model2 = Word(**word_model_dict) + + # Verify the model instances are equivalent + assert word_model == word_model2 + + # Convert model instance back to dict and verify no loss of data + word_model_json2 = word_model.to_dict() + assert word_model_json2 == word_model_json + + +class TestModel_Words: + """ + Test Class for Words + """ + + def test_words_serialization(self): + """ + Test serialization/deserialization for Words + """ + + # Construct dict forms of any model objects needed in order to build this model. + + word_model = {} # Word + word_model['word'] = 'testString' + word_model['translation'] = 'testString' + word_model['part_of_speech'] = 'Dosi' + + # Construct a json representation of a Words model + words_model_json = {} + words_model_json['words'] = [word_model] + + # Construct a model instance of Words by calling from_dict on the json representation + words_model = Words.from_dict(words_model_json) + assert words_model != False + + # Construct a model instance of Words by calling from_dict on the json representation + words_model_dict = Words.from_dict(words_model_json).__dict__ + words_model2 = Words(**words_model_dict) + + # Verify the model instances are equivalent + assert words_model == words_model2 + + # Convert model instance back to dict and verify no loss of data + words_model_json2 = words_model.to_dict() + assert words_model_json2 == words_model_json -@responses.activate -def test_success(): - voices_url = 'https://stream.watsonplatform.net/text-to-speech/api/v1/voices' - voices_response = { - "voices": [{ - "url": - "https://stream.watsonplatform.net/text-to-speech/api/v1/voices/VoiceEnUsLisa", - "gender": - "female", - "name": - "VoiceEnUsLisa", - "language": - "en-US" - }, { - "url": - "https://stream.watsonplatform.net/text-to-speech/api/v1/voices/VoiceEsEsEnrique", - "gender": - "male", - "name": - "VoiceEsEsEnrique", - "language": - "es-ES" - }, { - "url": - "https://stream.watsonplatform.net/text-to-speech/api/v1/voices/VoiceEnUsMichael", - "gender": - "male", - "name": - "VoiceEnUsMichael", - "language": - "en-US" - }, { - "url": - "https://stream.watsonplatform.net/text-to-speech/api/v1/voices/VoiceEnUsAllison", - "gender": - "female", - "name": - "VoiceEnUsAllison", - "language": - "en-US" - }] - } - voice_url = 'https://stream.watsonplatform.net/text-to-speech/api/v1/voices/en-us_AllisonVoice' - voice_response = { - "url": - "https://stream.watsonplatform.net/text-to-speech/api/v1/voices/en-US_AllisonVoice", - "name": - "en-US_AllisonVoice", - "language": - "en-US", - "customizable": - True, - "gender": - "female", - "description": - "Allison: American English female voice.", - "supported_features": { - "custom_pronunciation": True, - "voice_transformation": True - } - } - synthesize_url = 'https://stream.watsonplatform.net/text-to-speech/api/v1/synthesize' - synthesize_response_body = '' - - responses.add( - responses.GET, - voices_url, - body=json.dumps(voices_response), - status=200, - content_type='application/json') - responses.add( - responses.GET, - voice_url, - body=json.dumps(voice_response), - status=200, - content_type='application/json') - responses.add( - responses.POST, - synthesize_url, - body=synthesize_response_body, - status=200, - content_type='application/json', - match_querystring=True) - - text_to_speech = ibm_watson.TextToSpeechV1( - username="username", password="password") - - text_to_speech.list_voices() - assert responses.calls[0].request.url == voices_url - assert responses.calls[0].response.text == json.dumps(voices_response) - - text_to_speech.get_voice('en-us_AllisonVoice') - assert responses.calls[1].request.url == voice_url - assert responses.calls[1].response.text == json.dumps(voice_response) - - text_to_speech.synthesize('hello') - assert responses.calls[2].request.url == synthesize_url - assert responses.calls[2].response.text == synthesize_response_body - - assert len(responses.calls) == 3 - - -@responses.activate -def test_get_pronunciation(): - - responses.add( - responses.GET, - 'https://stream.watsonplatform.net/text-to-speech/api/v1/pronunciation', - body='{"pronunciation": "pronunciation info" }', - status=200, - content_type='application_json') - - text_to_speech = ibm_watson.TextToSpeechV1( - username="username", password="password") - - text_to_speech.get_pronunciation(text="this is some text") - text_to_speech.get_pronunciation(text="yo", voice="VoiceEnUsLisa") - text_to_speech.get_pronunciation( - text="yo", voice="VoiceEnUsLisa", format='ipa') - - assert len(responses.calls) == 3 - - -@responses.activate -def test_custom_voice_models(): - responses.add( - responses.GET, - 'https://stream.watsonplatform.net/text-to-speech/api/v1/customizations', - body='{"customizations": "yep" }', - status=200, - content_type='application_json') - responses.add( - responses.POST, - 'https://stream.watsonplatform.net/text-to-speech/api/v1/customizations', - body='{"customizations": "yep" }', - status=200, - content_type='application_json') - responses.add( - responses.GET, - 'https://stream.watsonplatform.net/text-to-speech/api/v1/customizations/custid', - body='{"customization": "yep, just one" }', - status=200, - content_type='application_json') - responses.add( - responses.POST, - 'https://stream.watsonplatform.net/text-to-speech/api/v1/customizations/custid', - body='{"customizations": "yep" }', - status=200, - content_type='application_json') - responses.add( - responses.DELETE, - 'https://stream.watsonplatform.net/text-to-speech/api/v1/customizations/custid', - body='{"customizations": "yep" }', - status=200, - content_type='application_json') - - text_to_speech = ibm_watson.TextToSpeechV1( - username="username", password="password") - text_to_speech.list_voice_models() - text_to_speech.list_voice_models(language="en-US") - assert len(responses.calls) == 2 - - text_to_speech.create_voice_model(name="name", description="description") - text_to_speech.get_voice_model(customization_id='custid') - text_to_speech.update_voice_model( - customization_id="custid", name="name", description="description") - text_to_speech.delete_voice_model(customization_id="custid") - - assert len(responses.calls) == 6 - - -@responses.activate -def test_custom_words(): - base_url = 'https://stream.watsonplatform.net/text-to-speech/api/v1/customizations' - responses.add( - responses.GET, - "{0}/{1}/words".format(base_url, "custid"), - body='{"customizations": "yep" }', - status=200, - content_type='application_json') - responses.add( - responses.POST, - "{0}/{1}/words".format(base_url, "custid"), - body='{"customizations": "yep" }', - status=200, - content_type='application_json') - responses.add( - responses.GET, - "{0}/{1}/words/{2}".format(base_url, "custid", "word"), - body='{"customization": "yep, just one" }', - status=200, - content_type='application_json') - responses.add( - responses.POST, - "{0}/{1}/words/{2}".format(base_url, "custid", "word"), - body='{"customizations": "yep" }', - status=200, - content_type='application_json') - responses.add( - responses.PUT, - "{0}/{1}/words/{2}".format(base_url, "custid", "word"), - body='{"customizations": "yep" }', - status=200, - content_type='application_json') - responses.add( - responses.DELETE, - "{0}/{1}/words/{2}".format(base_url, "custid", "word"), - body='{"customizations": "yep" }', - status=200, - content_type='application_json') - - text_to_speech = ibm_watson.TextToSpeechV1( - username="username", password="password") - - text_to_speech.list_words(customization_id="custid") - text_to_speech.add_words( - customization_id="custid", words=[{"word": "one", "translation": "one"}, {"word": "two", "translation": "two"}]) - text_to_speech.get_word(customization_id="custid", word="word") - text_to_speech.add_word( - customization_id='custid', word="word", translation="I'm translated") - text_to_speech.delete_word(customization_id="custid", word="word") - - assert len(responses.calls) == 5 - -@responses.activate - -def test_delete_user_data(): - url = 'https://stream.watsonplatform.net/text-to-speech/api/v1/user_data' - responses.add( - responses.DELETE, - url, - body='{"description": "success" }', - status=204, - content_type='application_json') - - text_to_speech = ibm_watson.TextToSpeechV1(username="username", password="password") - response = text_to_speech.delete_user_data('id').get_result() - assert response is None - assert len(responses.calls) == 1 +# endregion +############################################################################## +# End of Model Tests +############################################################################## diff --git a/test/unit/test_tone_analyzer_v3.py b/test/unit/test_tone_analyzer_v3.py deleted file mode 100755 index d0ae45ff2..000000000 --- a/test/unit/test_tone_analyzer_v3.py +++ /dev/null @@ -1,143 +0,0 @@ -# coding: utf-8 -import responses -import ibm_watson -from ibm_watson import ApiException -import os -import json - - -@responses.activate -# Simple test, just calling tone() with some text -def test_tone(): - tone_url = 'https://gateway.watsonplatform.net/tone-analyzer/api/v3/tone' - tone_args = '?version=2016-05-19' - tone_response = None - with open(os.path.join(os.path.dirname(__file__), '../../resources/tone-v3-expect1.json')) as response_json: - tone_response = response_json.read() - - responses.add(responses.POST, tone_url, - body=tone_response, status=200, - content_type='application/json') - - with open(os.path.join(os.path.dirname(__file__), '../../resources/personality.txt')) as tone_text: - tone_analyzer = ibm_watson.ToneAnalyzerV3("2016-05-19", - username="username", - password="password") - tone_analyzer.tone(tone_text.read(), content_type='application/json') - - assert responses.calls[0].request.url == tone_url + tone_args - assert responses.calls[0].response.text == tone_response - - assert len(responses.calls) == 1 - - -@responses.activate -# Invoking tone() with some modifiers given in 'params': sentences skipped -def test_tone_with_args(): - tone_url = 'https://gateway.watsonplatform.net/tone-analyzer/api/v3/tone' - tone_args = {'version': '2016-05-19', 'sentences': 'false'} - tone_response = None - with open(os.path.join(os.path.dirname(__file__), '../../resources/tone-v3-expect1.json')) as response_json: - tone_response = response_json.read() - - responses.add(responses.POST, tone_url, - body=tone_response, status=200, - content_type='application/json') - - with open(os.path.join(os.path.dirname(__file__), '../../resources/personality.txt')) as tone_text: - tone_analyzer = ibm_watson.ToneAnalyzerV3("2016-05-19", username="username", password="password") - tone_analyzer.tone(tone_text.read(), content_type='application/json', sentences=False) - - assert responses.calls[0].request.url.split('?')[0] == tone_url - # Compare args. Order is not deterministic! - actualArgs = {} - for arg in responses.calls[0].request.url.split('?')[1].split('&'): - actualArgs[arg.split('=')[0]] = arg.split('=')[1] - assert actualArgs == tone_args - assert responses.calls[0].response.text == tone_response - assert len(responses.calls) == 1 - - -@responses.activate -# Invoking tone() with some modifiers specified as positional parameters: sentences is false -def test_tone_with_positional_args(): - tone_url = 'https://gateway.watsonplatform.net/tone-analyzer/api/v3/tone' - tone_args = {'version': '2016-05-19', 'sentences': 'false'} - tone_response = None - with open(os.path.join(os.path.dirname(__file__), '../../resources/tone-v3-expect1.json')) as response_json: - tone_response = response_json.read() - - responses.add(responses.POST, tone_url, - body=tone_response, status=200, - content_type='application/json') - - with open(os.path.join(os.path.dirname(__file__), '../../resources/personality.txt')) as tone_text: - tone_analyzer = ibm_watson.ToneAnalyzerV3("2016-05-19", username="username", password="password") - tone_analyzer.tone(tone_text.read(), content_type='application/json', sentences=False) - - assert responses.calls[0].request.url.split('?')[0] == tone_url - # Compare args. Order is not deterministic! - actualArgs = {} - for arg in responses.calls[0].request.url.split('?')[1].split('&'): - actualArgs[arg.split('=')[0]] = arg.split('=')[1] - assert actualArgs == tone_args - assert responses.calls[0].response.text == tone_response - assert len(responses.calls) == 1 - - -@responses.activate -# Invoking tone_chat() -def test_tone_chat(): - tone_url = 'https://gateway.watsonplatform.net/tone-analyzer/api/v3/tone_chat' - tone_args = '?version=2016-05-19' - tone_response = None - with open(os.path.join(os.path.dirname(__file__), '../../resources/tone-v3-expect2.json')) as response_json: - tone_response = response_json.read() - - responses.add(responses.POST, tone_url, - body=tone_response, status=200, - content_type='application/json') - - tone_analyzer = ibm_watson.ToneAnalyzerV3("2016-05-19", - username="username", - password="password") - utterances = [{'text': 'I am very happy', 'user': 'glenn'}] - tone_analyzer.tone_chat(utterances) - - assert responses.calls[0].request.url == tone_url + tone_args - assert responses.calls[0].response.text == tone_response - assert len(responses.calls) == 1 - - -######################### -# error response -######################### - - -@responses.activate -def test_error(): - tone_url = 'https://gateway.watsonplatform.net/tone-analyzer/api/v3/tone' - error_code = 400 - error_message = "Invalid JSON input at line 2, column 12" - tone_response = { - "code": error_code, - "sub_code": "C00012", - "error": error_message - } - responses.add(responses.POST, - tone_url, - body=json.dumps(tone_response), - status=error_code, - content_type='application/json') - - tone_analyzer = ibm_watson.ToneAnalyzerV3('2016-05-19', - username='username', - password='password') - text = 'Team, I know that times are tough!' - try: - tone_analyzer.tone(text, 'application/json') - except ApiException as ex: - assert len(responses.calls) == 1 - assert isinstance(ex, ApiException) - assert ex.code == error_code - assert ex.message == error_message diff --git a/test/unit/test_visual_recognition_v3.py b/test/unit/test_visual_recognition_v3.py deleted file mode 100644 index d81de2ba1..000000000 --- a/test/unit/test_visual_recognition_v3.py +++ /dev/null @@ -1,262 +0,0 @@ -# coding: utf-8 -import responses -import ibm_watson -import json -import os - -from unittest import TestCase - -base_url = "https://gateway.watsonplatform.net/visual-recognition/api/" - -class TestVisualRecognitionV3(TestCase): - @classmethod - def setUp(cls): - iam_url = "https://iam.cloud.ibm.com/identity/token" - iam_token_response = """{ - "access_token": "oAeisG8yqPY7sFR_x66Z15", - "token_type": "Bearer", - "expires_in": 3600, - "expiration": 1524167011, - "refresh_token": "jy4gl91BQ" - }""" - responses.add(responses.POST, url=iam_url, body=iam_token_response, status=200) - - @responses.activate - def test_get_classifier(self): - vr_service = ibm_watson.VisualRecognitionV3('2016-10-20', iam_apikey='bogusapikey') - - gc_url = "{0}{1}".format(base_url, 'v3/classifiers/bogusnumber') - - response = { - "classifier_id": "bogusnumber", - "name": "Dog Breeds", - "owner": "58b61352-678c-44d1-9f40-40edf4ea8d19", - "status": "failed", - "created": "2017-08-25T06:39:01.968Z", - "classes": [{"class": "goldenretriever"}] - } - - responses.add(responses.GET, - gc_url, - body=json.dumps(response), - status=200, - content_type='application/json') - vr_service.get_classifier(classifier_id='bogusnumber') - - assert len(responses.calls) == 2 - - @responses.activate - def test_delete_classifier(self): - vr_service = ibm_watson.VisualRecognitionV3('2016-10-20', iam_apikey='bogusapikey') - - gc_url = "{0}{1}".format(base_url, 'v3/classifiers/bogusnumber') - - responses.add(responses.DELETE, - gc_url, - body=json.dumps({'response': 200}), - status=200, - content_type='application/json') - vr_service.delete_classifier(classifier_id='bogusnumber') - - assert len(responses.calls) == 2 - - @responses.activate - def test_list_classifiers(self): - vr_service = ibm_watson.VisualRecognitionV3('2016-10-20', iam_apikey='bogusapikey') - - gc_url = "{0}{1}".format(base_url, 'v3/classifiers') - - response = {"classifiers": [ - { - "classifier_id": "InsuranceClaims_1362331461", - "name": "Insurance Claims", - "status": "ready" - }, - { - "classifier_id": "DogBreeds_1539707331", - "name": "Dog Breeds", - "status": "ready" - } - ]} - - responses.add(responses.GET, - gc_url, - body=json.dumps(response), - status=200, - content_type='application/json') - vr_service.list_classifiers() - - assert len(responses.calls) == 2 - - @responses.activate - def test_create_classifier(self): - vr_service = ibm_watson.VisualRecognitionV3('2016-10-20', iam_apikey='bogusapikey') - - gc_url = "{0}{1}".format(base_url, 'v3/classifiers') - - response = { - "classifier_id": "DogBreeds_2014254824", - "name": "Dog Breeds", - "owner": "58b61352-678c-44d1-9f40-40edf4ea8d19", - "status": "failed", - "created": "2017-08-25T06:39:01.968Z", - "classes": [{"class": "goldenretriever"}] - } - - responses.add(responses.POST, - gc_url, - body=json.dumps(response), - status=200, - content_type='application/json') - - with open(os.path.join(os.path.dirname(__file__), '../../resources/cars.zip'), 'rb') as cars, \ - open(os.path.join(os.path.dirname(__file__), '../../resources/trucks.zip'), 'rb') as trucks: - vr_service.create_classifier('Cars vs Trucks', positive_examples={'cars': cars}, negative_examples=trucks) - - assert len(responses.calls) == 2 - - @responses.activate - def test_update_classifier(self): - vr_service = ibm_watson.VisualRecognitionV3('2016-10-20', iam_apikey='bogusapikey') - - gc_url = "{0}{1}".format(base_url, 'v3/classifiers/bogusid') - - response = { - "classifier_id": "bogusid", - "name": "Insurance Claims", - "owner": "58b61352-678c-44d1-9f40-40edf4ea8d19", - "status": "ready", - "created": "2017-07-17T22:17:14.860Z", - "classes": [ - {"class": "motorcycleaccident"}, - {"class": "flattire"}, - {"class": "brokenwinshield"} - ] - } - - responses.add(responses.POST, - gc_url, - body=json.dumps(response), - status=200, - content_type='application/json') - - vr_service.update_classifier(classifier_id="bogusid") - assert len(responses.calls) == 2 - - @responses.activate - def test_classify(self): - vr_service = ibm_watson.VisualRecognitionV3('2016-10-20', iam_apikey='bogusapikey') - - gc_url = "{0}{1}".format(base_url, 'v3/classify') - - response = {"images": [ - {"image": "test.jpg", - "classifiers": [ - {"classes": [ - {"score": 0.95, "class": "tiger", "type_hierarchy": "/animal/mammal/carnivore/feline/big cat/tiger"}, - {"score": 0.997, "class": "big cat"}, - {"score": 0.998, "class": "feline"}, - {"score": 0.998, "class": "carnivore"}, - {"score": 0.998, "class": "mammal"}, - {"score": 0.999, "class": "animal"} - ], - "classifier_id": "default", - "name": "default"} - ] - } - ], - "custom_classes": 0, - "images_processed": 1 - } - - responses.add(responses.GET, - gc_url, - body=json.dumps(response), - status=200, - content_type='application/json') - responses.add(responses.POST, - gc_url, - body=json.dumps(response), - status=200, - content_type='application/json') - - vr_service.classify(parameters='{"url": "http://google.com"}') - - vr_service.classify(parameters=json.dumps({'url': 'http://google.com', 'classifier_ids': ['one', 'two', 'three']})) - vr_service.classify(parameters=json.dumps({'url': 'http://google.com', 'owners': ['me', 'IBM']})) - - with open(os.path.join(os.path.dirname(__file__), '../../resources/test.jpg'), 'rb') as image_file: - vr_service.classify(images_file=image_file) - assert len(responses.calls) == 8 - - @responses.activate - def test_detect_faces(self): - vr_service = ibm_watson.VisualRecognitionV3('2016-10-20', iam_apikey='bogusapikey') - - gc_url = "{0}{1}".format(base_url, 'v3/detect_faces') - - response = { - "images": [ - { - "faces": [ - { - "age": { - "max": 44, - "min": 35, - "score": 0.446989 - }, - "face_location": { - "height": 159, - "left": 256, - "top": 64, - "width": 92 - }, - "gender": { - "gender": "MALE", - "score": 0.99593 - }, - "identity": { - "name": "Barack Obama", - "score": 0.970688, - "type_hierarchy": "/people/politicians/democrats/barack obama" - } - } - ], - "resolved_url": "https://watson-developer-cloud.github.io/doc-tutorial-downloads/visual-recognition/prez.jpg", - "source_url": "https://watson-developer-cloud.github.io/doc-tutorial-downloads/visual-recognition/prez.jpg" - } - ], - "images_processed": 1 - } - - responses.add(responses.GET, - gc_url, - body=json.dumps(response), - status=200, - content_type='application/json') - - responses.add(responses.POST, - gc_url, - body=json.dumps(response), - status=200, - content_type='application/json') - - vr_service.detect_faces(parameters='{"url": "http://google.com"}') - with open(os.path.join(os.path.dirname(__file__), '../../resources/test.jpg'), 'rb') as image_file: - vr_service.detect_faces(images_file=image_file) - assert len(responses.calls) == 4 - - @responses.activate - def test_delete_user_data(self): - url = "{0}{1}".format(base_url, 'v3/user_data') - responses.add( - responses.DELETE, - url, - body='{"description": "success" }', - status=204, - content_type='application_json') - - vr_service = ibm_watson.VisualRecognitionV3('2016-10-20', iam_apikey='bogusapikey') - response = vr_service.delete_user_data('id').get_result() - assert response is None - assert len(responses.calls) == 2 diff --git a/tox.ini b/tox.ini deleted file mode 100644 index 1f0396717..000000000 --- a/tox.ini +++ /dev/null @@ -1,18 +0,0 @@ -[tox] -envlist = lint, py27, py35, py36, py37 - -[testenv:lint] -basepython = python3.7 -deps = pylint -commands = pylint ibm_watson test examples - -[testenv] -passenv = TOXENV CI TRAVIS* -commands = - py.test --reruns 3 --cov=ibm_watson - codecov -e TOXENV -deps = - -r{toxinidir}/requirements.txt - -r{toxinidir}/requirements-dev.txt -usedevelop = True -exclude = .venv,.git,.tox,docs